EqualityVerifier

Verifies whether objects/variable are equal to an expected value


Class: TestFuseDFS

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test random access to a file */ @Test public void testRandomAccess() throws IOException { final String contents="hello world"; File f=new File(mountPoint,"file1"); createFile(f,contents); RandomAccessFile raf=new RandomAccessFile(f,"rw"); raf.seek(f.length()); try { raf.write('b'); } catch ( IOException e) { assertEquals("Operation not supported",e.getMessage()); } finally { raf.close(); } raf=new RandomAccessFile(f,"rw"); raf.seek(0); try { raf.write('b'); fail("Over-wrote existing bytes"); } catch ( IOException e) { assertEquals("Invalid argument",e.getMessage()); } finally { raf.close(); } execAssertSucceeds("rm " + f.getAbsolutePath()); }

EqualityVerifier 
/** * Test copying a set of files from the mount to itself */ @Test public void testCopyFiles() throws IOException { final String contents="hello world"; File d1=new File(mountPoint,"dir1"); File d2=new File(mountPoint,"dir2"); execAssertSucceeds("mkdir " + d1.getAbsolutePath()); for (int i=0; i < 5; i++) { createFile(new File(d1,"file" + i),contents); } assertEquals(5,d1.listFiles().length); execAssertSucceeds("cp -r " + d1.getAbsolutePath() + " "+ d2.getAbsolutePath()); assertEquals(5,d2.listFiles().length); execAssertSucceeds("find " + d1.getAbsolutePath()); execAssertSucceeds("find " + d2.getAbsolutePath()); execAssertSucceeds("rm -r " + d1.getAbsolutePath()); execAssertSucceeds("rm -r " + d2.getAbsolutePath()); }

Class: org.apache.hadoop.TestGenericRefresh

InternalCallVerifier EqualityVerifier 
@Test public void testMultipleReturnCodeMerging() throws Exception { RefreshHandler handlerOne=Mockito.mock(RefreshHandler.class); Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(),Mockito.any(String[].class))).toReturn(new RefreshResponse(23,"Twenty Three")); RefreshHandler handlerTwo=Mockito.mock(RefreshHandler.class); Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(),Mockito.any(String[].class))).toReturn(new RefreshResponse(10,"Ten")); RefreshRegistry.defaultRegistry().register("shared",handlerOne); RefreshRegistry.defaultRegistry().register("shared",handlerTwo); DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"shared"}; int exitCode=admin.run(args); assertEquals(-1,exitCode); Mockito.verify(handlerOne).handleRefresh("shared",new String[]{}); Mockito.verify(handlerTwo).handleRefresh("shared",new String[]{}); RefreshRegistry.defaultRegistry().unregisterAll("shared"); }

InternalCallVerifier EqualityVerifier 
@Test public void testUnregistration() throws Exception { RefreshRegistry.defaultRegistry().unregisterAll("firstHandler"); DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"firstHandler"}; int exitCode=admin.run(args); assertEquals("DFSAdmin should return -1",-1,exitCode); }

InternalCallVerifier EqualityVerifier 
@Test public void testMultipleRegistration() throws Exception { RefreshRegistry.defaultRegistry().register("sharedId",firstHandler); RefreshRegistry.defaultRegistry().register("sharedId",secondHandler); DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"sharedId","one"}; int exitCode=admin.run(args); assertEquals(-1,exitCode); Mockito.verify(firstHandler).handleRefresh("sharedId",new String[]{"one"}); Mockito.verify(secondHandler).handleRefresh("sharedId",new String[]{"one"}); RefreshRegistry.defaultRegistry().unregisterAll("sharedId"); }

InternalCallVerifier EqualityVerifier 
@Test public void testExceptionResultsInNormalError() throws Exception { RefreshHandler exceptionalHandler=Mockito.mock(RefreshHandler.class); Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(),Mockito.any(String[].class))).toThrow(new RuntimeException("Exceptional Handler Throws Exception")); RefreshHandler otherExceptionalHandler=Mockito.mock(RefreshHandler.class); Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(),Mockito.any(String[].class))).toThrow(new RuntimeException("More Exceptions")); RefreshRegistry.defaultRegistry().register("exceptional",exceptionalHandler); RefreshRegistry.defaultRegistry().register("exceptional",otherExceptionalHandler); DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"exceptional"}; int exitCode=admin.run(args); assertEquals(-1,exitCode); Mockito.verify(exceptionalHandler).handleRefresh("exceptional",new String[]{}); Mockito.verify(otherExceptionalHandler).handleRefresh("exceptional",new String[]{}); RefreshRegistry.defaultRegistry().unregisterAll("exceptional"); }

InternalCallVerifier EqualityVerifier 
@Test public void testInvalidIdentifier() throws Exception { DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"unregisteredIdentity"}; int exitCode=admin.run(args); assertEquals("DFSAdmin should fail due to no handler registered",-1,exitCode); }

InternalCallVerifier EqualityVerifier 
@Test public void testInvalidCommand() throws Exception { DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refresh","nn"}; int exitCode=admin.run(args); assertEquals("DFSAdmin should fail due to bad args",-1,exitCode); }

InternalCallVerifier EqualityVerifier 
@Test public void testVariableArgs() throws Exception { DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"secondHandler","one"}; int exitCode=admin.run(args); assertEquals("DFSAdmin should return 2",2,exitCode); exitCode=admin.run(new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"secondHandler","one","two"}); assertEquals("DFSAdmin should now return 3",3,exitCode); Mockito.verify(secondHandler).handleRefresh("secondHandler",new String[]{"one"}); Mockito.verify(secondHandler).handleRefresh("secondHandler",new String[]{"one","two"}); }

InternalCallVerifier EqualityVerifier 
@Test public void testValidIdentifier() throws Exception { DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refresh","localhost:" + cluster.getNameNodePort(),"firstHandler"}; int exitCode=admin.run(args); assertEquals("DFSAdmin should succeed",0,exitCode); Mockito.verify(firstHandler).handleRefresh("firstHandler",new String[]{}); Mockito.verify(secondHandler,Mockito.never()).handleRefresh(Mockito.anyString(),Mockito.any(String[].class)); }

Class: org.apache.hadoop.TestRefreshCallQueue

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRefresh() throws Exception { assertTrue("Mock queue should have been constructed",mockQueueConstructions > 0); assertTrue("Puts are routed through MockQueue",canPutInMockQueue()); int lastMockQueueConstructions=mockQueueConstructions; DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refreshCallQueue"}; int exitCode=admin.run(args); assertEquals("DFSAdmin should return 0",0,exitCode); assertEquals("Mock queue should have no additional constructions",lastMockQueueConstructions,mockQueueConstructions); try { assertFalse("Puts are routed through LBQ instead of MockQueue",canPutInMockQueue()); } catch ( IOException ioe) { fail("Could not put into queue at all"); } }

Class: org.apache.hadoop.conf.TestConfServlet

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testBadFormat() throws Exception { StringWriter sw=new StringWriter(); try { ConfServlet.writeResponse(getTestConf(),sw,"not a format"); fail("writeResponse with bad format didn't throw!"); } catch ( ConfServlet.BadFormatException bfe) { } assertEquals("",sw.toString()); }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteXml() throws Exception { StringWriter sw=new StringWriter(); ConfServlet.writeResponse(getTestConf(),sw,"xml"); String xml=sw.toString(); DocumentBuilderFactory docBuilderFactory=DocumentBuilderFactory.newInstance(); DocumentBuilder builder=docBuilderFactory.newDocumentBuilder(); Document doc=builder.parse(new InputSource(new StringReader(xml))); NodeList nameNodes=doc.getElementsByTagName("name"); boolean foundSetting=false; for (int i=0; i < nameNodes.getLength(); i++) { Node nameNode=nameNodes.item(i); String key=nameNode.getTextContent(); System.err.println("xml key: " + key); if (TEST_KEY.equals(key)) { foundSetting=true; Element propertyElem=(Element)nameNode.getParentNode(); String val=propertyElem.getElementsByTagName("value").item(0).getTextContent(); assertEquals(TEST_VAL,val); } } assertTrue(foundSetting); }

Class: org.apache.hadoop.conf.TestConfigurationDeprecation

EqualityVerifier 
@Test public void testNoFalseDeprecationWarning() throws IOException { Configuration conf=new Configuration(); Configuration.addDeprecation("AA","BB"); conf.set("BB","bb"); conf.get("BB"); conf.writeXml(new ByteArrayOutputStream()); assertEquals(false,Configuration.hasWarnedDeprecation("AA")); conf.set("AA","aa"); assertEquals(true,Configuration.hasWarnedDeprecation("AA")); }

BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIteratorWithDeprecatedKeys(){ Configuration conf=new Configuration(); Configuration.addDeprecation("dK",new String[]{"nK"}); conf.set("k","v"); conf.set("dK","V"); assertEquals("V",conf.get("dK")); assertEquals("V",conf.get("nK")); conf.set("nK","VV"); assertEquals("VV",conf.get("dK")); assertEquals("VV",conf.get("nK")); boolean kFound=false; boolean dKFound=false; boolean nKFound=false; for ( Map.Entry entry : conf) { if (entry.getKey().equals("k")) { assertEquals("v",entry.getValue()); kFound=true; } if (entry.getKey().equals("dK")) { assertEquals("VV",entry.getValue()); dKFound=true; } if (entry.getKey().equals("nK")) { assertEquals("VV",entry.getValue()); nKFound=true; } } assertTrue("regular Key not found",kFound); assertTrue("deprecated Key not found",dKFound); assertTrue("new Key not found",nKFound); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Run a set of threads making changes to the deprecations * concurrently with another set of threads calling get() * and set() on Configuration objects. */ @SuppressWarnings("deprecation") @Test(timeout=60000) public void testConcurrentDeprecateAndManipulate() throws Exception { final int NUM_THREAD_IDS=10; final int NUM_KEYS_PER_THREAD=1000; ScheduledThreadPoolExecutor executor=new ScheduledThreadPoolExecutor(2 * NUM_THREAD_IDS,new ThreadFactoryBuilder().setDaemon(true).setNameFormat("testConcurrentDeprecateAndManipulate modification thread %d").build()); final CountDownLatch latch=new CountDownLatch(1); final AtomicInteger highestModificationThreadId=new AtomicInteger(1); List> futures=new LinkedList>(); for (int i=0; i < NUM_THREAD_IDS; i++) { futures.add(executor.schedule(new Callable(){ @Override public Void call() throws Exception { latch.await(); int threadIndex=highestModificationThreadId.addAndGet(1); for (int i=0; i < NUM_KEYS_PER_THREAD; i++) { String testKey=getTestKeyName(threadIndex,i); String testNewKey=testKey + ".new"; Configuration.addDeprecations(new DeprecationDelta[]{new DeprecationDelta(testKey,testNewKey)}); } return null; } } ,0,TimeUnit.SECONDS)); } final AtomicInteger highestAccessThreadId=new AtomicInteger(1); for (int i=0; i < NUM_THREAD_IDS; i++) { futures.add(executor.schedule(new Callable(){ @Override public Void call() throws Exception { Configuration conf=new Configuration(); latch.await(); int threadIndex=highestAccessThreadId.addAndGet(1); for (int i=0; i < NUM_KEYS_PER_THREAD; i++) { String testNewKey=getTestKeyName(threadIndex,i) + ".new"; String value="value." + threadIndex + "."+ i; conf.set(testNewKey,value); Assert.assertEquals(value,conf.get(testNewKey)); } return null; } } ,0,TimeUnit.SECONDS)); } latch.countDown(); for ( Future future : futures) { Uninterruptibles.getUninterruptibly(future); } }

InternalCallVerifier EqualityVerifier 
@Test public void testSetBeforeAndGetAfterDeprecation(){ Configuration conf=new Configuration(); conf.set("oldkey","hello"); Configuration.addDeprecation("oldkey",new String[]{"newkey"}); assertEquals("hello",conf.get("newkey")); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetBeforeAndGetAfterDeprecationAndDefaults(){ Configuration conf=new Configuration(); conf.set("tests.fake-default.old-key","hello"); Configuration.addDeprecation("tests.fake-default.old-key",new String[]{"tests.fake-default.new-key"}); assertEquals("hello",conf.get("tests.fake-default.new-key")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * This test is to ensure the correctness of loading of keys with respect to * being marked as final and that are related to deprecation. * @throws IOException */ @Test public void testDeprecationForFinalParameters() throws IOException { addDeprecationToConfiguration(); out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("A","a",true); appendProperty("D","d"); appendProperty("E","e"); appendProperty("H","h",true); appendProperty("J","",true); endConfig(); Path fileResource=new Path(CONFIG); conf.addResource(fileResource); assertEquals("a",conf.get("A")); assertEquals("a",conf.get("B")); assertEquals("d",conf.get("C")); assertEquals("d",conf.get("D")); assertEquals("e",conf.get("E")); assertEquals("e",conf.get("F")); assertEquals("h",conf.get("G")); assertEquals("h",conf.get("H")); assertNull(conf.get("I")); assertNull(conf.get("J")); out=new BufferedWriter(new FileWriter(CONFIG2)); startConfig(); appendProperty("B","b"); appendProperty("C","c",true); appendProperty("F","f",true); appendProperty("G","g"); appendProperty("I","i"); endConfig(); Path fileResource1=new Path(CONFIG2); conf.addResource(fileResource1); assertEquals("a",conf.get("A")); assertEquals("a",conf.get("B")); assertEquals("c",conf.get("C")); assertEquals("c",conf.get("D")); assertEquals("f",conf.get("E")); assertEquals("f",conf.get("F")); assertEquals("h",conf.get("G")); assertEquals("h",conf.get("H")); assertNull(conf.get("I")); assertNull(conf.get("J")); out=new BufferedWriter(new FileWriter(CONFIG3)); startConfig(); appendProperty("A","a1"); appendProperty("B","b1"); appendProperty("C","c1"); appendProperty("D","d1"); appendProperty("E","e1"); appendProperty("F","f1"); appendProperty("G","g1"); appendProperty("H","h1"); appendProperty("I","i1"); appendProperty("J","j1"); endConfig(); fileResource=new Path(CONFIG); conf.addResource(fileResource); assertEquals("a",conf.get("A")); assertEquals("a",conf.get("B")); assertEquals("c",conf.get("C")); assertEquals("c",conf.get("D")); assertEquals("f",conf.get("E")); assertEquals("f",conf.get("F")); assertEquals("h",conf.get("G")); assertEquals("h",conf.get("H")); assertNull(conf.get("I")); assertNull(conf.get("J")); }

InternalCallVerifier EqualityVerifier 
/** * This test checks the correctness of loading/setting the properties in terms * of occurrence of deprecated keys. * @throws IOException */ @Test public void testDeprecation() throws IOException { addDeprecationToConfiguration(); out=new BufferedWriter(new FileWriter(CONFIG)); startConfig(); appendProperty("A","a"); appendProperty("D","d"); appendProperty("P","p"); endConfig(); Path fileResource=new Path(CONFIG); conf.addResource(fileResource); assertEquals("p",conf.get("P")); assertEquals("p",conf.get("Q")); assertEquals("p",conf.get("R")); assertEquals("a",conf.get("A")); assertEquals("a",conf.get("B")); assertEquals("d",conf.get("C")); assertEquals("d",conf.get("D")); out=new BufferedWriter(new FileWriter(CONFIG2)); startConfig(); appendProperty("B","b"); appendProperty("C","c"); endConfig(); Path fileResource1=new Path(CONFIG2); conf.addResource(fileResource1); assertEquals("b",conf.get("A")); assertEquals("b",conf.get("B")); assertEquals("c",conf.get("C")); assertEquals("c",conf.get("D")); conf.set("N","n"); assertEquals("n",conf.get("M")); assertEquals(conf.get("M"),conf.get("N")); conf.set("M","m"); assertEquals("m",conf.get("N")); conf.set("X","x"); assertEquals("x",conf.get("X")); assertEquals("x",conf.get("Y")); assertEquals("x",conf.get("Z")); conf.set("Y","y"); conf.set("Z","z"); assertEquals("z",conf.get("X")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testUnsetWithDeprecatedKeys(){ Configuration conf=new Configuration(); Configuration.addDeprecation("dK",new String[]{"nK"}); conf.set("nK","VV"); assertEquals("VV",conf.get("dK")); assertEquals("VV",conf.get("nK")); conf.unset("dK"); assertNull(conf.get("dK")); assertNull(conf.get("nK")); conf.set("nK","VV"); assertEquals("VV",conf.get("dK")); assertEquals("VV",conf.get("nK")); conf.unset("nK"); assertNull(conf.get("dK")); assertNull(conf.get("nK")); }

InternalCallVerifier EqualityVerifier 
@Test public void testDeprecationSetUnset() throws IOException { addDeprecationToConfiguration(); Configuration conf=new Configuration(); conf.set("Y","y"); assertEquals("y",conf.get("Z")); conf.set("X","x"); assertEquals("x",conf.get("Z")); conf.unset("Y"); assertEquals(null,conf.get("Z")); assertEquals(null,conf.get("X")); }

Class: org.apache.hadoop.conf.TestDeprecatedKeys

BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testIteratorWithDeprecatedKeysMappedToMultipleNewKeys(){ Configuration conf=new Configuration(); Configuration.addDeprecation("dK",new String[]{"nK1","nK2"}); conf.set("k","v"); conf.set("dK","V"); assertEquals("V",conf.get("dK")); assertEquals("V",conf.get("nK1")); assertEquals("V",conf.get("nK2")); conf.set("nK1","VV"); assertEquals("VV",conf.get("dK")); assertEquals("VV",conf.get("nK1")); assertEquals("VV",conf.get("nK2")); conf.set("nK2","VVV"); assertEquals("VVV",conf.get("dK")); assertEquals("VVV",conf.get("nK2")); assertEquals("VVV",conf.get("nK1")); boolean kFound=false; boolean dKFound=false; boolean nK1Found=false; boolean nK2Found=false; for ( Map.Entry entry : conf) { if (entry.getKey().equals("k")) { assertEquals("v",entry.getValue()); kFound=true; } if (entry.getKey().equals("dK")) { assertEquals("VVV",entry.getValue()); dKFound=true; } if (entry.getKey().equals("nK1")) { assertEquals("VVV",entry.getValue()); nK1Found=true; } if (entry.getKey().equals("nK2")) { assertEquals("VVV",entry.getValue()); nK2Found=true; } } assertTrue("regular Key not found",kFound); assertTrue("deprecated Key not found",dKFound); assertTrue("new Key 1 not found",nK1Found); assertTrue("new Key 2 not found",nK2Found); }

Class: org.apache.hadoop.conf.TestJobConf

InternalCallVerifier EqualityVerifier 
/** * Test that negative values for new configuration keys get passed through. */ @Test public void testNegativeValuesForMemoryParams(){ JobConf configuration=new JobConf(); configuration.set(MRJobConfig.MAP_MEMORY_MB,"-5"); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-6"); Assert.assertEquals(-5,configuration.getMemoryForMapTask()); Assert.assertEquals(-6,configuration.getMemoryForReduceTask()); }

InternalCallVerifier EqualityVerifier 
/** * Test deprecated accessor and mutator method for mapred.task.maxvmem */ @Test public void testMaxVirtualMemoryForTask(){ JobConf configuration=new JobConf(); configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300)); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(-1)); Assert.assertEquals(configuration.getMaxVirtualMemoryForTask(),300 * 1024 * 1024); configuration=new JobConf(); configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(-1)); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(200)); Assert.assertEquals(configuration.getMaxVirtualMemoryForTask(),200 * 1024 * 1024); configuration=new JobConf(); configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(-1)); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(-1)); configuration.set("mapred.task.maxvmem",String.valueOf(1 * 1024 * 1024)); Assert.assertEquals(configuration.getMaxVirtualMemoryForTask(),1 * 1024 * 1024); configuration=new JobConf(); configuration.set("mapred.task.maxvmem",String.valueOf(1 * 1024 * 1024)); Assert.assertEquals(configuration.getMaxVirtualMemoryForTask(),1 * 1024 * 1024); configuration=new JobConf(); configuration.setMaxVirtualMemoryForTask(2 * 1024 * 1024); Assert.assertEquals(configuration.getMemoryForMapTask(),2); Assert.assertEquals(configuration.getMemoryForReduceTask(),2); configuration=new JobConf(); configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300)); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(400)); configuration.setMaxVirtualMemoryForTask(2 * 1024 * 1024); Assert.assertEquals(configuration.getMemoryForMapTask(),2); Assert.assertEquals(configuration.getMemoryForReduceTask(),2); }

InternalCallVerifier EqualityVerifier 
/** * Test that negative values for MAPRED_TASK_MAXVMEM_PROPERTY cause * new configuration keys' values to be used. */ @Test public void testNegativeValueForTaskVmem(){ JobConf configuration=new JobConf(); configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY,"-3"); Assert.assertEquals(MRJobConfig.DEFAULT_MAP_MEMORY_MB,configuration.getMemoryForMapTask()); Assert.assertEquals(MRJobConfig.DEFAULT_REDUCE_MEMORY_MB,configuration.getMemoryForReduceTask()); configuration.set(MRJobConfig.MAP_MEMORY_MB,"4"); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"5"); Assert.assertEquals(4,configuration.getMemoryForMapTask()); Assert.assertEquals(5,configuration.getMemoryForReduceTask()); }

InternalCallVerifier EqualityVerifier 
@Test public void testProfileParamsSetter(){ JobConf configuration=new JobConf(); configuration.setProfileParams("test"); Assert.assertEquals("test",configuration.get(MRJobConfig.TASK_PROFILE_PARAMS)); }

InternalCallVerifier EqualityVerifier 
@Test public void testProfileParamsGetter(){ JobConf configuration=new JobConf(); configuration.set(MRJobConfig.TASK_PROFILE_PARAMS,"test"); Assert.assertEquals("test",configuration.getProfileParams()); }

InternalCallVerifier EqualityVerifier 
/** * Testing mapred.task.maxvmem replacement with new values */ @Test public void testMemoryConfigForMapOrReduceTask(){ JobConf configuration=new JobConf(); configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300)); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300)); Assert.assertEquals(configuration.getMemoryForMapTask(),300); Assert.assertEquals(configuration.getMemoryForReduceTask(),300); configuration.set("mapred.task.maxvmem",String.valueOf(2 * 1024 * 1024)); configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300)); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300)); Assert.assertEquals(configuration.getMemoryForMapTask(),2); Assert.assertEquals(configuration.getMemoryForReduceTask(),2); configuration=new JobConf(); configuration.set("mapred.task.maxvmem","-1"); configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300)); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(400)); Assert.assertEquals(configuration.getMemoryForMapTask(),300); Assert.assertEquals(configuration.getMemoryForReduceTask(),400); configuration=new JobConf(); configuration.set("mapred.task.maxvmem",String.valueOf(2 * 1024 * 1024)); configuration.set(MRJobConfig.MAP_MEMORY_MB,"-1"); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-1"); Assert.assertEquals(configuration.getMemoryForMapTask(),2); Assert.assertEquals(configuration.getMemoryForReduceTask(),2); configuration=new JobConf(); configuration.set("mapred.task.maxvmem",String.valueOf(-1)); configuration.set(MRJobConfig.MAP_MEMORY_MB,"-1"); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-1"); Assert.assertEquals(configuration.getMemoryForMapTask(),-1); Assert.assertEquals(configuration.getMemoryForReduceTask(),-1); configuration=new JobConf(); configuration.set("mapred.task.maxvmem",String.valueOf(2 * 1024 * 1024)); configuration.set(MRJobConfig.MAP_MEMORY_MB,"3"); configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"3"); Assert.assertEquals(configuration.getMemoryForMapTask(),2); Assert.assertEquals(configuration.getMemoryForReduceTask(),2); }

Class: org.apache.hadoop.contrib.bkjournal.TestBookKeeperAsHASharedDir

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test HA failover, where BK, as the shared storage, fails. * Once it becomes available again, a standby can come up. * Verify that any write happening after the BK fail is not * available on the standby. */ @Test public void testFailoverWithFailingBKCluster() throws Exception { int ensembleSize=numBookies + 1; BookieServer newBookie=bkutil.newBookie(); assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10)); BookieServer replacementBookie=null; MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,BKJMUtil.createJournalURI("/hotfailoverWithFail").toString()); conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize); conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize); BKJMUtil.addJournalManagerDefinition(conf); cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).manageNameDfsSharedDirs(false).checkExitOnShutdown(false).build(); NameNode nn1=cluster.getNameNode(0); NameNode nn2=cluster.getNameNode(1); cluster.waitActive(); cluster.transitionToActive(0); Path p1=new Path("/testBKJMFailingBKCluster1"); Path p2=new Path("/testBKJMFailingBKCluster2"); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); fs.mkdirs(p1); newBookie.shutdown(); assertEquals("New bookie didn't stop",numBookies,bkutil.checkBookiesUp(numBookies,10)); try { fs.mkdirs(p2); fail("mkdirs should result in the NN exiting"); } catch ( RemoteException re) { assertTrue(re.getClassName().contains("ExitException")); } cluster.shutdownNameNode(0); try { cluster.transitionToActive(1); fail("Shouldn't have been able to transition with bookies down"); } catch ( ExitException ee) { assertTrue("Should shutdown due to required journal failure",ee.getMessage().contains("starting log segment 3 failed for required journal")); } replacementBookie=bkutil.newBookie(); assertEquals("Replacement bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10)); cluster.transitionToActive(1); assertTrue(fs.exists(p1)); assertFalse(fs.exists(p2)); } finally { newBookie.shutdown(); if (replacementBookie != null) { replacementBookie.shutdown(); } if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.contrib.bkjournal.TestBookKeeperJournalManager

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that if enough bookies fail to prevent an ensemble, * writes the bookkeeper will fail. Test that when once again * an ensemble is available, it can continue to write. */ @Test public void testAllBookieFailure() throws Exception { BookieServer bookieToFail=bkutil.newBookie(); BookieServer replacementBookie=null; try { int ensembleSize=numBookies + 1; assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10)); Configuration conf=new Configuration(); conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize); conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize); long txid=1; NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),nsi); bkjm.format(nsi); EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long i=1; i <= 3; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.setReadyToFlush(); out.flush(); bookieToFail.shutdown(); assertEquals("New bookie didn't die",numBookies,bkutil.checkBookiesUp(numBookies,10)); try { for (long i=1; i <= 3; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.setReadyToFlush(); out.flush(); fail("should not get to this stage"); } catch ( IOException ioe) { LOG.debug("Error writing to bookkeeper",ioe); assertTrue("Invalid exception message",ioe.getMessage().contains("Failed to write to bookkeeper")); } replacementBookie=bkutil.newBookie(); assertEquals("New bookie didn't start",numBookies + 1,bkutil.checkBookiesUp(numBookies + 1,10)); bkjm.recoverUnfinalizedSegments(); out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long i=1; i <= 3; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.setReadyToFlush(); out.flush(); } catch ( Exception e) { LOG.error("Exception in test",e); throw e; } finally { if (replacementBookie != null) { replacementBookie.shutdown(); } bookieToFail.shutdown(); if (bkutil.checkBookiesUp(numBookies,30) != numBookies) { LOG.warn("Not all bookies from this test shut down, expect errors"); } } }

IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNumberOfTransactionsWithInprogressAtEnd() throws Exception { NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"),nsi); bkjm.format(nsi); long txid=1; for (long i=0; i < 3; i++) { long start=txid; EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.close(); bkjm.finalizeLogSegment(start,(txid - 1)); assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start,(txid - 1)),false)); } long start=txid; EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long j=1; j <= DEFAULT_SEGMENT_SIZE / 2; j++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.setReadyToFlush(); out.flush(); out.abort(); out.close(); long numTrans=bkjm.getNumberOfTransactions(1,true); assertEquals((txid - 1),numTrans); }

InternalCallVerifier EqualityVerifier 
@Test public void testNumberOfTransactions() throws Exception { NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-txncount"),nsi); bkjm.format(nsi); EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long i=1; i <= 100; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(i); out.write(op); } out.close(); bkjm.finalizeLogSegment(1,100); long numTrans=bkjm.getNumberOfTransactions(1,true); assertEquals(100,numTrans); }

IterativeVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNumberOfTransactionsWithGaps() throws Exception { NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-gaps"),nsi); bkjm.format(nsi); long txid=1; for (long i=0; i < 3; i++) { long start=txid; EditLogOutputStream out=bkjm.startLogSegment(start,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long j=1; j <= DEFAULT_SEGMENT_SIZE; j++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.close(); bkjm.finalizeLogSegment(start,txid - 1); assertNotNull(zkc.exists(bkjm.finalizedLedgerZNode(start,txid - 1),false)); } zkc.delete(bkjm.finalizedLedgerZNode(DEFAULT_SEGMENT_SIZE + 1,DEFAULT_SEGMENT_SIZE * 2),-1); long numTrans=bkjm.getNumberOfTransactions(1,true); assertEquals(DEFAULT_SEGMENT_SIZE,numTrans); try { numTrans=bkjm.getNumberOfTransactions(DEFAULT_SEGMENT_SIZE + 1,true); fail("Should have thrown corruption exception by this point"); } catch ( JournalManager.CorruptionException ce) { } numTrans=bkjm.getNumberOfTransactions((DEFAULT_SEGMENT_SIZE * 2) + 1,true); assertEquals(DEFAULT_SEGMENT_SIZE,numTrans); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests that the edit log file meta data reading from ZooKeeper should be * able to handle the NoNodeException. bkjm.getInputStream(fromTxId, * inProgressOk) should suppress the NoNodeException and continue. HDFS-3441. */ @Test public void testEditLogFileNotExistsWhenReadingMetadata() throws Exception { URI uri=BKJMUtil.createJournalURI("/hdfsjournal-editlogfile"); NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,uri,nsi); bkjm.format(nsi); try { String zkpath1=startAndFinalizeLogSegment(bkjm,1,50); String zkpath2=startAndFinalizeLogSegment(bkjm,51,100); ZooKeeper zkspy=spy(BKJMUtil.connectZooKeeper()); bkjm.setZooKeeper(zkspy); Mockito.doThrow(new KeeperException.NoNodeException(zkpath2 + " doesn't exists")).when(zkspy).getData(zkpath2,false,null); List ledgerList=bkjm.getLedgerList(false); assertEquals("List contains the metadata of non exists path.",1,ledgerList.size()); assertEquals("LogLedgerMetadata contains wrong zk paths.",zkpath1,ledgerList.get(0).getZkPath()); } finally { bkjm.close(); } }

EqualityVerifier 
@Test public void testSimpleRead() throws Exception { NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-simpleread"),nsi); bkjm.format(nsi); final long numTransactions=10000; EditLogOutputStream out=bkjm.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); ; for (long i=1; i <= numTransactions; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(i); out.write(op); } out.close(); bkjm.finalizeLogSegment(1,numTransactions); List in=new ArrayList(); bkjm.selectInputStreams(in,1,true); try { assertEquals(numTransactions,FSEditLogTestUtil.countTransactionsInStream(in.get(0))); } finally { in.get(0).close(); } }

InternalCallVerifier EqualityVerifier 
/** * Test that a BookKeeper JM can continue to work across the * failure of a bookie. This should be handled transparently * by bookkeeper. */ @Test public void testOneBookieFailure() throws Exception { BookieServer bookieToFail=bkutil.newBookie(); BookieServer replacementBookie=null; try { int ensembleSize=numBookies + 1; assertEquals("New bookie didn't start",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10)); Configuration conf=new Configuration(); conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_ENSEMBLE_SIZE,ensembleSize); conf.setInt(BookKeeperJournalManager.BKJM_BOOKKEEPER_QUORUM_SIZE,ensembleSize); long txid=1; NamespaceInfo nsi=newNSInfo(); BookKeeperJournalManager bkjm=new BookKeeperJournalManager(conf,BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"),nsi); bkjm.format(nsi); EditLogOutputStream out=bkjm.startLogSegment(txid,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); for (long i=1; i <= 3; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.setReadyToFlush(); out.flush(); replacementBookie=bkutil.newBookie(); assertEquals("replacement bookie didn't start",ensembleSize + 1,bkutil.checkBookiesUp(ensembleSize + 1,10)); bookieToFail.shutdown(); assertEquals("New bookie didn't die",ensembleSize,bkutil.checkBookiesUp(ensembleSize,10)); for (long i=1; i <= 3; i++) { FSEditLogOp op=FSEditLogTestUtil.getNoOpInstance(); op.setTransactionId(txid++); out.write(op); } out.setReadyToFlush(); out.flush(); } catch ( Exception e) { LOG.error("Exception in test",e); throw e; } finally { if (replacementBookie != null) { replacementBookie.shutdown(); } bookieToFail.shutdown(); if (bkutil.checkBookiesUp(numBookies,30) != numBookies) { LOG.warn("Not all bookies from this test shut down, expect errors"); } } }

Class: org.apache.hadoop.contrib.bkjournal.TestBootstrapStandbyWithBKJM

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * While boostrapping, in_progress transaction entries should be skipped. * Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck" */ @Test public void testBootstrapStandbyWithActiveNN() throws Exception { cluster.transitionToActive(0); Configuration confNN1=cluster.getConfiguration(1); DistributedFileSystem dfs=(DistributedFileSystem)HATestUtil.configureFailoverFs(cluster,confNN1); for (int i=1; i <= 10; i++) { dfs.mkdirs(new Path("/test" + i)); } dfs.close(); cluster.shutdownNameNode(1); deleteEditLogIfExists(confNN1); cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER,true); cluster.getNameNodeRpc(0).saveNamespace(); cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE,true); int rc=BootstrapStandby.run(new String[]{"-force","-nonInteractive"},confNN1); Assert.assertEquals("Mismatches return code",6,rc); rc=BootstrapStandby.run(new String[]{"-force","-nonInteractive","-skipSharedEditsCheck"},confNN1); Assert.assertEquals("Mismatches return code",0,rc); confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,1); cluster.restartNameNode(1); cluster.transitionToStandby(1); NameNode nn0=cluster.getNameNode(0); HATestUtil.waitForStandbyToCatchUp(nn0,cluster.getNameNode(1)); long expectedCheckpointTxId=NameNodeAdapter.getNamesystem(nn0).getFSImage().getMostRecentCheckpointTxId(); HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of((int)expectedCheckpointTxId)); FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of((int)expectedCheckpointTxId)); FSImageTestUtil.assertNNFilesMatch(cluster); }

Class: org.apache.hadoop.contrib.bkjournal.TestCurrentInprogress

InternalCallVerifier EqualityVerifier 
/** * Tests that read should be able to read the data which updated with update * api */ @Test public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception { String data="inprogressNode"; CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH); ci.init(); ci.update(data); String inprogressNodePath=ci.read(); assertEquals("Not returning inprogressZnode","inprogressNode",inprogressNodePath); }

InternalCallVerifier EqualityVerifier 
/** * Tests that read should return null if we clear the updated data in * CurrentInprogress node */ @Test public void testReadShouldReturnNullAfterClear() throws Exception { CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH); ci.init(); ci.update("myInprogressZnode"); ci.read(); ci.clear(); String inprogressNodePath=ci.read(); assertEquals("Expecting null to be return",null,inprogressNodePath); }

InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
/** * Tests that update should throw IOE, if version number modifies between read * and update */ @Test(expected=IOException.class) public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead() throws Exception { CurrentInprogress ci=new CurrentInprogress(zkc,CURRENT_NODE_PATH); ci.init(); ci.update("myInprogressZnode"); assertEquals("Not returning myInprogressZnode","myInprogressZnode",ci.read()); ci.update("YourInprogressZnode"); ci.update("myInprogressZnode"); }

Class: org.apache.hadoop.crypto.CryptoStreamsTestBase

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=120000) public void testCombinedOp() throws Exception { OutputStream out=getOutputStream(defaultBufferSize); writeData(out); final int len1=dataLen / 8; final int len2=dataLen / 10; InputStream in=getInputStream(defaultBufferSize); byte[] readData=new byte[len1]; readAll(in,readData,0,len1); byte[] expectedData=new byte[len1]; System.arraycopy(data,0,expectedData,0,len1); Assert.assertArrayEquals(readData,expectedData); long pos=((Seekable)in).getPos(); Assert.assertEquals(len1,pos); ((Seekable)in).seek(pos + len2); long n=in.skip(len2); Assert.assertEquals(len2,n); positionedReadCheck(in,dataLen / 4); pos=((Seekable)in).getPos(); Assert.assertEquals(len1 + len2 + len2,pos); ByteBuffer buf=ByteBuffer.allocate(len1); int nRead=((ByteBufferReadable)in).read(buf); readData=new byte[nRead]; buf.rewind(); buf.get(readData); expectedData=new byte[nRead]; System.arraycopy(data,(int)pos,expectedData,0,nRead); Assert.assertArrayEquals(readData,expectedData); pos=((Seekable)in).getPos(); Assert.assertEquals(len1 + 2 * len2 + nRead,pos); positionedReadCheck(in,dataLen / 3); readData=new byte[len1]; readAll(in,readData,0,len1); expectedData=new byte[len1]; System.arraycopy(data,(int)pos,expectedData,0,len1); Assert.assertArrayEquals(readData,expectedData); pos=((Seekable)in).getPos(); Assert.assertEquals(2 * len1 + 2 * len2 + nRead,pos); buf=ByteBuffer.allocate(len1); nRead=((ByteBufferReadable)in).read(buf); readData=new byte[nRead]; buf.rewind(); buf.get(readData); expectedData=new byte[nRead]; System.arraycopy(data,(int)pos,expectedData,0,nRead); Assert.assertArrayEquals(readData,expectedData); ((Seekable)in).seek(dataLen); buf.clear(); n=((ByteBufferReadable)in).read(buf); Assert.assertEquals(n,-1); in.close(); }

APIUtilityVerifier EqualityVerifier 
/** * Test get position. */ @Test(timeout=120000) public void testGetPos() throws Exception { OutputStream out=getOutputStream(defaultBufferSize); writeData(out); InputStream in=getInputStream(defaultBufferSize); byte[] result=new byte[dataLen]; int n1=readAll(in,result,0,dataLen / 3); Assert.assertEquals(n1,((Seekable)in).getPos()); int n2=readAll(in,result,n1,dataLen - n1); Assert.assertEquals(n1 + n2,((Seekable)in).getPos()); in.close(); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=120000) public void testAvailable() throws Exception { OutputStream out=getOutputStream(defaultBufferSize); writeData(out); InputStream in=getInputStream(defaultBufferSize); byte[] result=new byte[dataLen]; int n1=readAll(in,result,0,dataLen / 3); Assert.assertEquals(in.available(),dataLen - n1); int n2=readAll(in,result,n1,dataLen - n1); Assert.assertEquals(in.available(),dataLen - n1 - n2); in.close(); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test skip. */ @Test(timeout=120000) public void testSkip() throws Exception { OutputStream out=getOutputStream(defaultBufferSize); writeData(out); InputStream in=getInputStream(defaultBufferSize); byte[] result=new byte[dataLen]; int n1=readAll(in,result,0,dataLen / 3); Assert.assertEquals(n1,((Seekable)in).getPos()); long skipped=in.skip(dataLen / 3); int n2=readAll(in,result,0,dataLen); Assert.assertEquals(dataLen,n1 + skipped + n2); byte[] readData=new byte[n2]; System.arraycopy(result,0,readData,0,n2); byte[] expectedData=new byte[n2]; System.arraycopy(data,dataLen - n2,expectedData,0,n2); Assert.assertArrayEquals(readData,expectedData); try { skipped=in.skip(-3); Assert.fail("Skip Negative length should fail."); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Negative skip length",e); } skipped=in.skip(3); Assert.assertEquals(skipped,0); in.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=120000) public void testHasEnhancedByteBufferAccess() throws Exception { OutputStream out=getOutputStream(defaultBufferSize); writeData(out); InputStream in=getInputStream(defaultBufferSize); final int len1=dataLen / 8; ByteBuffer buffer=((HasEnhancedByteBufferAccess)in).read(getBufferPool(),len1,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); int n1=buffer.remaining(); byte[] readData=new byte[n1]; buffer.get(readData); byte[] expectedData=new byte[n1]; System.arraycopy(data,0,expectedData,0,n1); Assert.assertArrayEquals(readData,expectedData); ((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer); readData=new byte[len1]; readAll(in,readData,0,len1); expectedData=new byte[len1]; System.arraycopy(data,n1,expectedData,0,len1); Assert.assertArrayEquals(readData,expectedData); buffer=((HasEnhancedByteBufferAccess)in).read(getBufferPool(),len1,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); int n2=buffer.remaining(); readData=new byte[n2]; buffer.get(readData); expectedData=new byte[n2]; System.arraycopy(data,n1 + len1,expectedData,0,n2); Assert.assertArrayEquals(readData,expectedData); ((HasEnhancedByteBufferAccess)in).releaseBuffer(buffer); in.close(); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test seek to different position. */ @Test(timeout=120000) public void testSeek() throws Exception { OutputStream out=getOutputStream(defaultBufferSize); writeData(out); InputStream in=getInputStream(defaultBufferSize); seekCheck(in,dataLen / 3); seekCheck(in,0); seekCheck(in,dataLen / 2); final long pos=((Seekable)in).getPos(); try { seekCheck(in,-3); Assert.fail("Seek to negative offset should fail."); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Cannot seek to negative " + "offset",e); } Assert.assertEquals(pos,((Seekable)in).getPos()); try { seekCheck(in,dataLen + 3); Assert.fail("Seek after EOF should fail."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Cannot seek after EOF",e); } Assert.assertEquals(pos,((Seekable)in).getPos()); in.close(); }

EqualityVerifier 
/** * Test read fully */ @Test(timeout=120000) public void testReadFully() throws Exception { OutputStream out=getOutputStream(defaultBufferSize); writeData(out); InputStream in=getInputStream(defaultBufferSize); final int len1=dataLen / 4; byte[] readData=new byte[len1]; readAll(in,readData,0,len1); byte[] expectedData=new byte[len1]; System.arraycopy(data,0,expectedData,0,len1); Assert.assertArrayEquals(readData,expectedData); readFullyCheck(in,dataLen / 3); readData=new byte[len1]; readAll(in,readData,0,len1); expectedData=new byte[len1]; System.arraycopy(data,len1,expectedData,0,len1); Assert.assertArrayEquals(readData,expectedData); readFullyCheck(in,dataLen / 2); readData=new byte[len1]; readAll(in,readData,0,len1); expectedData=new byte[len1]; System.arraycopy(data,2 * len1,expectedData,0,len1); Assert.assertArrayEquals(readData,expectedData); in.close(); }

Class: org.apache.hadoop.crypto.TestCryptoCodec

AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=1200000) public void testOpensslAesCtrCryptoCodec() throws Exception { Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl()); Assert.assertEquals(null,OpensslCipher.getLoadingFailureReason()); cryptoCodecTest(conf,seed,0,"org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec"); cryptoCodecTest(conf,seed,count,"org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec"); }

Class: org.apache.hadoop.crypto.key.TestCachingKeyProvider

InternalCallVerifier EqualityVerifier 
@Test public void testRollNewVersion() throws Exception { KeyProvider.KeyVersion mockKey=Mockito.mock(KeyProvider.KeyVersion.class); KeyProvider mockProv=Mockito.mock(KeyProvider.class); Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey); KeyProvider cache=new CachingKeyProvider(mockProv,100,100); Assert.assertEquals(mockKey,cache.getCurrentKey("k1")); Mockito.verify(mockProv,Mockito.times(1)).getCurrentKey(Mockito.eq("k1")); cache.rollNewVersion("k1"); Assert.assertEquals(mockKey,cache.getCurrentKey("k1")); Mockito.verify(mockProv,Mockito.times(2)).getCurrentKey(Mockito.eq("k1")); cache.rollNewVersion("k1",new byte[0]); Assert.assertEquals(mockKey,cache.getCurrentKey("k1")); Mockito.verify(mockProv,Mockito.times(3)).getCurrentKey(Mockito.eq("k1")); }

InternalCallVerifier EqualityVerifier 
@Test public void testMetadata() throws Exception { KeyProvider.Metadata mockMeta=Mockito.mock(KeyProvider.Metadata.class); KeyProvider mockProv=Mockito.mock(KeyProvider.class); Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(mockMeta); Mockito.when(mockProv.getMetadata(Mockito.eq("k2"))).thenReturn(null); KeyProvider cache=new CachingKeyProvider(mockProv,100,100); Assert.assertEquals(mockMeta,cache.getMetadata("k1")); Mockito.verify(mockProv,Mockito.times(1)).getMetadata(Mockito.eq("k1")); Assert.assertEquals(mockMeta,cache.getMetadata("k1")); Mockito.verify(mockProv,Mockito.times(1)).getMetadata(Mockito.eq("k1")); Thread.sleep(200); Assert.assertEquals(mockMeta,cache.getMetadata("k1")); Mockito.verify(mockProv,Mockito.times(2)).getMetadata(Mockito.eq("k1")); cache=new CachingKeyProvider(mockProv,100,100); Assert.assertEquals(null,cache.getMetadata("k2")); Mockito.verify(mockProv,Mockito.times(1)).getMetadata(Mockito.eq("k2")); Assert.assertEquals(null,cache.getMetadata("k2")); Mockito.verify(mockProv,Mockito.times(2)).getMetadata(Mockito.eq("k2")); }

InternalCallVerifier EqualityVerifier 
@Test public void testDeleteKey() throws Exception { KeyProvider.KeyVersion mockKey=Mockito.mock(KeyProvider.KeyVersion.class); KeyProvider mockProv=Mockito.mock(KeyProvider.class); Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey); Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))).thenReturn(mockKey); Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(new KMSClientProvider.KMSMetadata("c",0,"l",null,new Date(),1)); KeyProvider cache=new CachingKeyProvider(mockProv,100,100); Assert.assertEquals(mockKey,cache.getCurrentKey("k1")); Mockito.verify(mockProv,Mockito.times(1)).getCurrentKey(Mockito.eq("k1")); Assert.assertEquals(mockKey,cache.getKeyVersion("k1@0")); Mockito.verify(mockProv,Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0")); cache.deleteKey("k1"); Assert.assertEquals(mockKey,cache.getCurrentKey("k1")); Mockito.verify(mockProv,Mockito.times(2)).getCurrentKey(Mockito.eq("k1")); Assert.assertEquals(mockKey,cache.getKeyVersion("k1@0")); Mockito.verify(mockProv,Mockito.times(2)).getKeyVersion(Mockito.eq("k1@0")); }

InternalCallVerifier EqualityVerifier 
@Test public void testCurrentKey() throws Exception { KeyProvider.KeyVersion mockKey=Mockito.mock(KeyProvider.KeyVersion.class); KeyProvider mockProv=Mockito.mock(KeyProvider.class); Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey); Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null); KeyProvider cache=new CachingKeyProvider(mockProv,100,100); Assert.assertEquals(mockKey,cache.getCurrentKey("k1")); Mockito.verify(mockProv,Mockito.times(1)).getCurrentKey(Mockito.eq("k1")); Assert.assertEquals(mockKey,cache.getCurrentKey("k1")); Mockito.verify(mockProv,Mockito.times(1)).getCurrentKey(Mockito.eq("k1")); Thread.sleep(1200); Assert.assertEquals(mockKey,cache.getCurrentKey("k1")); Mockito.verify(mockProv,Mockito.times(2)).getCurrentKey(Mockito.eq("k1")); cache=new CachingKeyProvider(mockProv,100,100); Assert.assertEquals(null,cache.getCurrentKey("k2")); Mockito.verify(mockProv,Mockito.times(1)).getCurrentKey(Mockito.eq("k2")); Assert.assertEquals(null,cache.getCurrentKey("k2")); Mockito.verify(mockProv,Mockito.times(2)).getCurrentKey(Mockito.eq("k2")); }

InternalCallVerifier EqualityVerifier 
@Test public void testKeyVersion() throws Exception { KeyProvider.KeyVersion mockKey=Mockito.mock(KeyProvider.KeyVersion.class); KeyProvider mockProv=Mockito.mock(KeyProvider.class); Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))).thenReturn(mockKey); Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null); KeyProvider cache=new CachingKeyProvider(mockProv,100,100); Assert.assertEquals(mockKey,cache.getKeyVersion("k1@0")); Mockito.verify(mockProv,Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0")); Assert.assertEquals(mockKey,cache.getKeyVersion("k1@0")); Mockito.verify(mockProv,Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0")); Thread.sleep(200); Assert.assertEquals(mockKey,cache.getKeyVersion("k1@0")); Mockito.verify(mockProv,Mockito.times(2)).getKeyVersion(Mockito.eq("k1@0")); cache=new CachingKeyProvider(mockProv,100,100); Assert.assertEquals(null,cache.getKeyVersion("k2@0")); Mockito.verify(mockProv,Mockito.times(1)).getKeyVersion(Mockito.eq("k2@0")); Assert.assertEquals(null,cache.getKeyVersion("k2@0")); Mockito.verify(mockProv,Mockito.times(2)).getKeyVersion(Mockito.eq("k2@0")); }

Class: org.apache.hadoop.crypto.key.TestKeyProvider

InternalCallVerifier EqualityVerifier 
@Test public void testKeyMaterial() throws Exception { byte[] key1=new byte[]{1,2,3,4}; KeyProvider.KeyVersion obj=new KeyProvider.KeyVersion("key1","key1@1",key1); assertEquals("key1@1",obj.getVersionName()); assertArrayEquals(new byte[]{1,2,3,4},obj.getMaterial()); }

EqualityVerifier 
@Test public void testBuildVersionName() throws Exception { assertEquals("/a/b@3",KeyProvider.buildVersionName("/a/b",3)); assertEquals("/aaa@12",KeyProvider.buildVersionName("/aaa",12)); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testParseVersionName() throws Exception { assertEquals("/a/b",KeyProvider.getBaseName("/a/b@3")); assertEquals("/aaa",KeyProvider.getBaseName("/aaa@112")); try { KeyProvider.getBaseName("no-slashes"); assertTrue("should have thrown",false); } catch ( IOException e) { assertTrue(true); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMetadata() throws Exception { DateFormat format=new SimpleDateFormat("y/m/d"); Date date=format.parse("2013/12/25"); KeyProvider.Metadata meta=new KeyProvider.Metadata("myCipher",100,null,null,date,123); assertEquals("myCipher",meta.getCipher()); assertEquals(100,meta.getBitLength()); assertNull(meta.getDescription()); assertEquals(date,meta.getCreated()); assertEquals(123,meta.getVersions()); KeyProvider.Metadata second=new KeyProvider.Metadata(meta.serialize()); assertEquals(meta.getCipher(),second.getCipher()); assertEquals(meta.getBitLength(),second.getBitLength()); assertNull(second.getDescription()); assertTrue(second.getAttributes().isEmpty()); assertEquals(meta.getCreated(),second.getCreated()); assertEquals(meta.getVersions(),second.getVersions()); int newVersion=second.addVersion(); assertEquals(123,newVersion); assertEquals(124,second.getVersions()); assertEquals(123,meta.getVersions()); format=new SimpleDateFormat("y/m/d"); date=format.parse("2013/12/25"); Map attributes=new HashMap(); attributes.put("a","A"); meta=new KeyProvider.Metadata("myCipher",100,"description",attributes,date,123); assertEquals("myCipher",meta.getCipher()); assertEquals(100,meta.getBitLength()); assertEquals("description",meta.getDescription()); assertEquals(attributes,meta.getAttributes()); assertEquals(date,meta.getCreated()); assertEquals(123,meta.getVersions()); second=new KeyProvider.Metadata(meta.serialize()); assertEquals(meta.getCipher(),second.getCipher()); assertEquals(meta.getBitLength(),second.getBitLength()); assertEquals(meta.getDescription(),second.getDescription()); assertEquals(meta.getAttributes(),second.getAttributes()); assertEquals(meta.getCreated(),second.getCreated()); assertEquals(meta.getVersions(),second.getVersions()); newVersion=second.addVersion(); assertEquals(123,newVersion); assertEquals(124,second.getVersions()); assertEquals(123,meta.getVersions()); }

EqualityVerifier 
@Test public void testUnnestUri() throws Exception { assertEquals(new Path("hdfs://nn.example.com/my/path"),ProviderUtils.unnestUri(new URI("myscheme://hdfs@nn.example.com/my/path"))); assertEquals(new Path("hdfs://nn/my/path?foo=bar&baz=bat#yyy"),ProviderUtils.unnestUri(new URI("myscheme://hdfs@nn/my/path?foo=bar&baz=bat#yyy"))); assertEquals(new Path("inner://hdfs@nn1.example.com/my/path"),ProviderUtils.unnestUri(new URI("outer://inner@hdfs@nn1.example.com/my/path"))); assertEquals(new Path("user:///"),ProviderUtils.unnestUri(new URI("outer://user/"))); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMaterialGeneration() throws Exception { MyKeyProvider kp=new MyKeyProvider(); KeyProvider.Options options=new KeyProvider.Options(new Configuration()); options.setCipher(CIPHER); options.setBitLength(128); kp.createKey("hello",options); Assert.assertEquals(128,kp.size); Assert.assertEquals(CIPHER,kp.algorithm); Assert.assertNotNull(kp.material); kp=new MyKeyProvider(); kp.rollNewVersion("hello"); Assert.assertEquals(128,kp.size); Assert.assertEquals(CIPHER,kp.algorithm); Assert.assertNotNull(kp.material); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testOptions() throws Exception { Configuration conf=new Configuration(); conf.set(KeyProvider.DEFAULT_CIPHER_NAME,"myCipher"); conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME,512); Map attributes=new HashMap(); attributes.put("a","A"); KeyProvider.Options options=KeyProvider.options(conf); assertEquals("myCipher",options.getCipher()); assertEquals(512,options.getBitLength()); options.setCipher("yourCipher"); options.setDescription("description"); options.setAttributes(attributes); options.setBitLength(128); assertEquals("yourCipher",options.getCipher()); assertEquals(128,options.getBitLength()); assertEquals("description",options.getDescription()); assertEquals(attributes,options.getAttributes()); options=KeyProvider.options(new Configuration()); assertEquals(KeyProvider.DEFAULT_CIPHER,options.getCipher()); assertEquals(KeyProvider.DEFAULT_BITLENGTH,options.getBitLength()); }

Class: org.apache.hadoop.crypto.key.TestKeyProviderCryptoExtension

BranchVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGenerateEncryptedKey() throws Exception { KeyProviderCryptoExtension.EncryptedKeyVersion ek1=kpExt.generateEncryptedKey(encryptionKey.getName()); assertEquals("Version name of EEK should be EEK",KeyProviderCryptoExtension.EEK,ek1.getEncryptedKeyVersion().getVersionName()); assertEquals("Name of EEK should be encryption key name",ENCRYPTION_KEY_NAME,ek1.getEncryptionKeyName()); assertNotNull("Expected encrypted key material",ek1.getEncryptedKeyVersion().getMaterial()); assertEquals("Length of encryption key material and EEK material should " + "be the same",encryptionKey.getMaterial().length,ek1.getEncryptedKeyVersion().getMaterial().length); KeyVersion k1=kpExt.decryptEncryptedKey(ek1); assertEquals(KeyProviderCryptoExtension.EK,k1.getVersionName()); assertEquals(encryptionKey.getMaterial().length,k1.getMaterial().length); if (Arrays.equals(k1.getMaterial(),encryptionKey.getMaterial())) { fail("Encrypted key material should not equal encryption key material"); } if (Arrays.equals(ek1.getEncryptedKeyVersion().getMaterial(),encryptionKey.getMaterial())) { fail("Encrypted key material should not equal decrypted key material"); } KeyVersion k1a=kpExt.decryptEncryptedKey(ek1); assertArrayEquals(k1.getMaterial(),k1a.getMaterial()); KeyProviderCryptoExtension.EncryptedKeyVersion ek2=kpExt.generateEncryptedKey(encryptionKey.getName()); KeyVersion k2=kpExt.decryptEncryptedKey(ek2); if (Arrays.equals(k1.getMaterial(),k2.getMaterial())) { fail("Generated EEKs should have different material!"); } if (Arrays.equals(ek1.getEncryptedKeyIv(),ek2.getEncryptedKeyIv())) { fail("Generated EEKs should have different IVs!"); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testEncryptDecrypt() throws Exception { KeyProviderCryptoExtension.EncryptedKeyVersion eek=kpExt.generateEncryptedKey(encryptionKey.getName()); final byte[] encryptedKeyIv=eek.getEncryptedKeyIv(); final byte[] encryptedKeyMaterial=eek.getEncryptedKeyVersion().getMaterial(); Cipher cipher=Cipher.getInstance("AES/CTR/NoPadding"); cipher.init(Cipher.DECRYPT_MODE,new SecretKeySpec(encryptionKey.getMaterial(),"AES"),new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion.deriveIV(encryptedKeyIv))); final byte[] manualMaterial=cipher.doFinal(encryptedKeyMaterial); EncryptedKeyVersion eek2=EncryptedKeyVersion.createForDecryption(eek.getEncryptionKeyVersionName(),eek.getEncryptedKeyIv(),eek.getEncryptedKeyVersion().getMaterial()); KeyVersion decryptedKey=kpExt.decryptEncryptedKey(eek2); final byte[] apiMaterial=decryptedKey.getMaterial(); assertArrayEquals("Wrong key material from decryptEncryptedKey",manualMaterial,apiMaterial); }

Class: org.apache.hadoop.crypto.key.TestKeyProviderDelegationTokenExtension

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateExtension() throws Exception { Configuration conf=new Configuration(); Credentials credentials=new Credentials(); KeyProvider kp=new UserProvider.Factory().createProvider(new URI("user:///"),conf); KeyProviderDelegationTokenExtension kpDTE1=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(kp); Assert.assertNotNull(kpDTE1); Assert.assertNull(kpDTE1.addDelegationTokens("user",credentials)); MockKeyProvider mock=mock(MockKeyProvider.class); when(mock.addDelegationTokens("renewer",credentials)).thenReturn(new Token[]{new Token(null,null,new Text("kind"),new Text("service"))}); KeyProviderDelegationTokenExtension kpDTE2=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(mock); Token[] tokens=kpDTE2.addDelegationTokens("renewer",credentials); Assert.assertNotNull(tokens); Assert.assertEquals("kind",tokens[0].getKind().toString()); }

Class: org.apache.hadoop.crypto.key.TestKeyProviderFactory

InternalCallVerifier EqualityVerifier 
@Test public void testUserProvider() throws Exception { Configuration conf=new Configuration(); final String ourUrl=UserProvider.SCHEME_NAME + ":///"; conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,ourUrl); checkSpecificProvider(conf,ourUrl); Credentials credentials=UserGroupInformation.getCurrentUser().getCredentials(); assertArrayEquals(new byte[]{1},credentials.getSecretKey(new Text("key4@0"))); assertArrayEquals(new byte[]{2},credentials.getSecretKey(new Text("key4@1"))); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testUriErrors() throws Exception { Configuration conf=new Configuration(); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,"unkn@own:/x/y"); try { List providers=KeyProviderFactory.getProviders(conf); assertTrue("should throw!",false); } catch ( IOException e) { assertEquals("Bad configuration of " + KeyProviderFactory.KEY_PROVIDER_PATH + " at unkn@own:/x/y",e.getMessage()); } }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFactoryErrors() throws Exception { Configuration conf=new Configuration(); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,"unknown:///"); try { List providers=KeyProviderFactory.getProviders(conf); assertTrue("should throw!",false); } catch ( IOException e) { assertEquals("No KeyProviderFactory for unknown:/// in " + KeyProviderFactory.KEY_PROVIDER_PATH,e.getMessage()); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testFactory() throws Exception { Configuration conf=new Configuration(); conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,UserProvider.SCHEME_NAME + ":///," + JavaKeyStoreProvider.SCHEME_NAME+ "://file"+ tmpDir+ "/test.jks"); List providers=KeyProviderFactory.getProviders(conf); assertEquals(2,providers.size()); assertEquals(UserProvider.class,providers.get(0).getClass()); assertEquals(JavaKeyStoreProvider.class,providers.get(1).getClass()); assertEquals(UserProvider.SCHEME_NAME + ":///",providers.get(0).toString()); assertEquals(JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks",providers.get(1).toString()); }

Class: org.apache.hadoop.crypto.key.TestKeyShell

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidProvider() throws Exception { final String[] args1={"create","key1","-cipher","AES","-provider","sdff://file/tmp/keystore.jceks"}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured.")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidKeySize() throws Exception { final String[] args1={"create","key1","-size","56","-provider",jceksProvider}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("key1 has not been created.")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTransientProviderOnlyConfig() throws Exception { final String[] args1={"create","key1"}; int rc=0; KeyShell ks=new KeyShell(); Configuration config=new Configuration(); config.set(KeyProviderFactory.KEY_PROVIDER_PATH,"user:///"); ks.setConf(config); rc=ks.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("There are no valid " + "KeyProviders configured.")); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testKeySuccessfulKeyLifecycle() throws Exception { int rc=0; String keyName="key1"; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); outContent.reset(); final String[] args1={"create",keyName,"-provider",jceksProvider}; rc=ks.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created")); String listOut=listKeys(ks,false); assertTrue(listOut.contains(keyName)); listOut=listKeys(ks,true); assertTrue(listOut.contains(keyName)); assertTrue(listOut.contains("description")); assertTrue(listOut.contains("created")); outContent.reset(); final String[] args2={"roll",keyName,"-provider",jceksProvider}; rc=ks.run(args2); assertEquals(0,rc); assertTrue(outContent.toString().contains("key1 has been successfully " + "rolled.")); deleteKey(ks,keyName); listOut=listKeys(ks,false); assertFalse(listOut,listOut.contains(keyName)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAttributes() throws Exception { int rc; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); final String[] args1={"create","keyattr1","-provider",jceksProvider,"-attr","foo=bar"}; rc=ks.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains("keyattr1 has been " + "successfully created")); String listOut=listKeys(ks,true); assertTrue(listOut.contains("keyattr1")); assertTrue(listOut.contains("attributes: [foo=bar]")); outContent.reset(); final String[] args2={"create","keyattr2","-provider",jceksProvider,"-attr","=bar"}; rc=ks.run(args2); assertEquals(1,rc); outContent.reset(); args2[5]="foo"; rc=ks.run(args2); assertEquals(1,rc); outContent.reset(); args2[5]="="; rc=ks.run(args2); assertEquals(1,rc); outContent.reset(); args2[5]="a=b=c"; rc=ks.run(args2); assertEquals(0,rc); listOut=listKeys(ks,true); assertTrue(listOut.contains("keyattr2")); assertTrue(listOut.contains("attributes: [a=b=c]")); outContent.reset(); final String[] args3={"create","keyattr3","-provider",jceksProvider,"-attr","foo = bar","-attr"," glarch =baz ","-attr","abc=def"}; rc=ks.run(args3); assertEquals(0,rc); listOut=listKeys(ks,true); assertTrue(listOut.contains("keyattr3")); assertTrue(listOut.contains("[foo=bar]")); assertTrue(listOut.contains("[glarch=baz]")); assertTrue(listOut.contains("[abc=def]")); outContent.reset(); final String[] args4={"create","keyattr4","-provider",jceksProvider,"-attr","foo=bar","-attr","foo=glarch"}; rc=ks.run(args4); assertEquals(1,rc); deleteKey(ks,"keyattr1"); deleteKey(ks,"keyattr2"); deleteKey(ks,"keyattr3"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidCipher() throws Exception { final String[] args1={"create","key1","-cipher","LJM","-provider",jceksProvider}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("key1 has not been created.")); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testKeySuccessfulCreationWithDescription() throws Exception { outContent.reset(); final String[] args1={"create","key1","-provider",jceksProvider,"-description","someDescription"}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains("key1 has been successfully " + "created")); String listOut=listKeys(ks,true); assertTrue(listOut.contains("description")); assertTrue(listOut.contains("someDescription")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTransientProviderWarning() throws Exception { final String[] args1={"create","key1","-cipher","AES","-provider","user:///"}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider.")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFullCipher() throws Exception { final String keyName="key1"; final String[] args1={"create",keyName,"-cipher","AES/CBC/pkcs5Padding","-provider",jceksProvider}; int rc=0; KeyShell ks=new KeyShell(); ks.setConf(new Configuration()); rc=ks.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains(keyName + " has been " + "successfully created")); deleteKey(ks,keyName); }

Class: org.apache.hadoop.crypto.key.TestValueQueue

EqualityVerifier 
/** * Verifies that Queue is initially filled to "numInitValues" */ @Test public void testInitFill() throws Exception { MockFiller filler=new MockFiller(); ValueQueue vq=new ValueQueue(10,0.1f,300,1,SyncGenerationPolicy.ALL,filler); Assert.assertEquals("test",vq.getNext("k1")); Assert.assertEquals(1,filler.getTop().num); vq.shutdown(); }

InternalCallVerifier EqualityVerifier 
/** * Verify getAtMost when SyncGeneration Policy = ALL */ @Test public void testgetAtMostPolicyALL() throws Exception { MockFiller filler=new MockFiller(); ValueQueue vq=new ValueQueue(10,0.1f,300,1,SyncGenerationPolicy.ALL,filler); Assert.assertEquals("test",vq.getNext("k1")); Assert.assertEquals(1,filler.getTop().num); Assert.assertEquals(10,vq.getAtMost("k1",10).size()); Assert.assertEquals(10,filler.getTop().num); Assert.assertEquals(19,vq.getAtMost("k1",19).size()); Assert.assertEquals(19,filler.getTop().num); vq.shutdown(); }

InternalCallVerifier EqualityVerifier 
/** * Verifies that Queue is initialized (Warmed-up) for provided keys */ @Test public void testWarmUp() throws Exception { MockFiller filler=new MockFiller(); ValueQueue vq=new ValueQueue(10,0.5f,300,1,SyncGenerationPolicy.ALL,filler); vq.initializeQueuesForKeys("k1","k2","k3"); FillInfo[] fillInfos={filler.getTop(),filler.getTop(),filler.getTop()}; Assert.assertEquals(5,fillInfos[0].num); Assert.assertEquals(5,fillInfos[1].num); Assert.assertEquals(5,fillInfos[2].num); Assert.assertEquals(Sets.newHashSet("k1","k2","k3"),Sets.newHashSet(fillInfos[0].key,fillInfos[1].key,fillInfos[2].key)); vq.shutdown(); }

InternalCallVerifier EqualityVerifier 
/** * Verify getAtMost when SyncGeneration Policy = ALL */ @Test public void testgetAtMostPolicyATLEAST_ONE() throws Exception { MockFiller filler=new MockFiller(); ValueQueue vq=new ValueQueue(10,0.3f,300,1,SyncGenerationPolicy.ATLEAST_ONE,filler); Assert.assertEquals("test",vq.getNext("k1")); Assert.assertEquals(3,filler.getTop().num); Assert.assertEquals(2,vq.getAtMost("k1",10).size()); Assert.assertEquals(10,filler.getTop().num); vq.shutdown(); }

InternalCallVerifier EqualityVerifier 
/** * Verifies that the refill task is executed after "checkInterval" if * num values below "lowWatermark" */ @Test public void testRefill() throws Exception { MockFiller filler=new MockFiller(); ValueQueue vq=new ValueQueue(10,0.1f,300,1,SyncGenerationPolicy.ALL,filler); Assert.assertEquals("test",vq.getNext("k1")); Assert.assertEquals(1,filler.getTop().num); vq.getNext("k1"); Assert.assertEquals(1,filler.getTop().num); Assert.assertEquals(10,filler.getTop().num); vq.shutdown(); }

InternalCallVerifier EqualityVerifier 
/** * Verify getAtMost when SyncGeneration Policy = LOW_WATERMARK */ @Test public void testgetAtMostPolicyLOW_WATERMARK() throws Exception { MockFiller filler=new MockFiller(); ValueQueue vq=new ValueQueue(10,0.3f,300,1,SyncGenerationPolicy.LOW_WATERMARK,filler); Assert.assertEquals("test",vq.getNext("k1")); Assert.assertEquals(3,filler.getTop().num); Assert.assertEquals(3,vq.getAtMost("k1",10).size()); Assert.assertEquals(1,filler.getTop().num); Assert.assertEquals(10,filler.getTop().num); vq.shutdown(); }

InternalCallVerifier EqualityVerifier 
/** * Verifies that the No refill Happens after "checkInterval" if * num values above "lowWatermark" */ @Test public void testNoRefill() throws Exception { MockFiller filler=new MockFiller(); ValueQueue vq=new ValueQueue(10,0.5f,300,1,SyncGenerationPolicy.ALL,filler); Assert.assertEquals("test",vq.getNext("k1")); Assert.assertEquals(5,filler.getTop().num); Assert.assertEquals(null,filler.getTop()); vq.shutdown(); }

Class: org.apache.hadoop.crypto.key.kms.server.TestKMS

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testKMSProvider() throws Exception { Configuration conf=new Configuration(); conf.set("hadoop.security.authentication","kerberos"); UserGroupInformation.setConfiguration(conf); File confDir=getTestDir(); conf=createBaseKMSConf(confDir); writeConf(confDir,conf); runServer(null,null,confDir,new KMSCallable(){ @Override public Void call() throws Exception { Date started=new Date(); Configuration conf=new Configuration(); URI uri=createKMSUri(getKMSUrl()); KeyProvider kp=new KMSClientProvider(uri,conf); Assert.assertTrue(kp.getKeys().isEmpty()); Assert.assertEquals(0,kp.getKeysMetadata().length); KeyProvider.Options options=new KeyProvider.Options(conf); options.setCipher("AES/CTR/NoPadding"); options.setBitLength(128); options.setDescription("l1"); KeyProvider.KeyVersion kv0=kp.createKey("k1",options); Assert.assertNotNull(kv0); Assert.assertNotNull(kv0.getVersionName()); Assert.assertNotNull(kv0.getMaterial()); KeyProvider.KeyVersion kv1=kp.getKeyVersion(kv0.getVersionName()); Assert.assertEquals(kv0.getVersionName(),kv1.getVersionName()); Assert.assertNotNull(kv1.getMaterial()); KeyProvider.KeyVersion cv1=kp.getCurrentKey("k1"); Assert.assertEquals(kv0.getVersionName(),cv1.getVersionName()); Assert.assertNotNull(cv1.getMaterial()); KeyProvider.Metadata m1=kp.getMetadata("k1"); Assert.assertEquals("AES/CTR/NoPadding",m1.getCipher()); Assert.assertEquals("AES",m1.getAlgorithm()); Assert.assertEquals(128,m1.getBitLength()); Assert.assertEquals(1,m1.getVersions()); Assert.assertNotNull(m1.getCreated()); Assert.assertTrue(started.before(m1.getCreated())); List lkv1=kp.getKeyVersions("k1"); Assert.assertEquals(1,lkv1.size()); Assert.assertEquals(kv0.getVersionName(),lkv1.get(0).getVersionName()); Assert.assertNotNull(kv1.getMaterial()); KeyProvider.KeyVersion kv2=kp.rollNewVersion("k1"); Assert.assertNotSame(kv0.getVersionName(),kv2.getVersionName()); Assert.assertNotNull(kv2.getMaterial()); kv2=kp.getKeyVersion(kv2.getVersionName()); boolean eq=true; for (int i=0; i < kv1.getMaterial().length; i++) { eq=eq && kv1.getMaterial()[i] == kv2.getMaterial()[i]; } Assert.assertFalse(eq); KeyProvider.KeyVersion cv2=kp.getCurrentKey("k1"); Assert.assertEquals(kv2.getVersionName(),cv2.getVersionName()); Assert.assertNotNull(cv2.getMaterial()); eq=true; for (int i=0; i < kv1.getMaterial().length; i++) { eq=eq && cv2.getMaterial()[i] == kv2.getMaterial()[i]; } Assert.assertTrue(eq); List lkv2=kp.getKeyVersions("k1"); Assert.assertEquals(2,lkv2.size()); Assert.assertEquals(kv1.getVersionName(),lkv2.get(0).getVersionName()); Assert.assertNotNull(lkv2.get(0).getMaterial()); Assert.assertEquals(kv2.getVersionName(),lkv2.get(1).getVersionName()); Assert.assertNotNull(lkv2.get(1).getMaterial()); KeyProvider.Metadata m2=kp.getMetadata("k1"); Assert.assertEquals("AES/CTR/NoPadding",m2.getCipher()); Assert.assertEquals("AES",m2.getAlgorithm()); Assert.assertEquals(128,m2.getBitLength()); Assert.assertEquals(2,m2.getVersions()); Assert.assertNotNull(m2.getCreated()); Assert.assertTrue(started.before(m2.getCreated())); List ks1=kp.getKeys(); Assert.assertEquals(1,ks1.size()); Assert.assertEquals("k1",ks1.get(0)); KeyProvider.Metadata[] kms1=kp.getKeysMetadata("k1"); Assert.assertEquals(1,kms1.length); Assert.assertEquals("AES/CTR/NoPadding",kms1[0].getCipher()); Assert.assertEquals("AES",kms1[0].getAlgorithm()); Assert.assertEquals(128,kms1[0].getBitLength()); Assert.assertEquals(2,kms1[0].getVersions()); Assert.assertNotNull(kms1[0].getCreated()); Assert.assertTrue(started.before(kms1[0].getCreated())); KeyProvider.KeyVersion kv=kp.getCurrentKey("k1"); KeyProviderCryptoExtension kpExt=KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp); EncryptedKeyVersion ek1=kpExt.generateEncryptedKey(kv.getName()); Assert.assertEquals(KeyProviderCryptoExtension.EEK,ek1.getEncryptedKeyVersion().getVersionName()); Assert.assertNotNull(ek1.getEncryptedKeyVersion().getMaterial()); Assert.assertEquals(kv.getMaterial().length,ek1.getEncryptedKeyVersion().getMaterial().length); KeyProvider.KeyVersion k1=kpExt.decryptEncryptedKey(ek1); Assert.assertEquals(KeyProviderCryptoExtension.EK,k1.getVersionName()); KeyProvider.KeyVersion k1a=kpExt.decryptEncryptedKey(ek1); Assert.assertArrayEquals(k1.getMaterial(),k1a.getMaterial()); Assert.assertEquals(kv.getMaterial().length,k1.getMaterial().length); EncryptedKeyVersion ek2=kpExt.generateEncryptedKey(kv.getName()); KeyProvider.KeyVersion k2=kpExt.decryptEncryptedKey(ek2); boolean isEq=true; for (int i=0; isEq && i < ek2.getEncryptedKeyVersion().getMaterial().length; i++) { isEq=k2.getMaterial()[i] == k1.getMaterial()[i]; } Assert.assertFalse(isEq); kp.deleteKey("k1"); Assert.assertNull(kp.getKeyVersion("k1")); Assert.assertNull(kp.getKeyVersions("k1")); Assert.assertNull(kp.getMetadata("k1")); Assert.assertTrue(kp.getKeys().isEmpty()); Assert.assertEquals(0,kp.getKeysMetadata().length); options=new KeyProvider.Options(conf); options.setCipher("AES/CTR/NoPadding"); options.setBitLength(128); kp.createKey("k2",options); KeyProvider.Metadata meta=kp.getMetadata("k2"); Assert.assertNull(meta.getDescription()); Assert.assertTrue(meta.getAttributes().isEmpty()); options=new KeyProvider.Options(conf); options.setCipher("AES/CTR/NoPadding"); options.setBitLength(128); options.setDescription("d"); kp.createKey("k3",options); meta=kp.getMetadata("k3"); Assert.assertEquals("d",meta.getDescription()); Assert.assertTrue(meta.getAttributes().isEmpty()); Map attributes=new HashMap(); attributes.put("a","A"); options=new KeyProvider.Options(conf); options.setCipher("AES/CTR/NoPadding"); options.setBitLength(128); options.setAttributes(attributes); kp.createKey("k4",options); meta=kp.getMetadata("k4"); Assert.assertNull(meta.getDescription()); Assert.assertEquals(attributes,meta.getAttributes()); options=new KeyProvider.Options(conf); options.setCipher("AES/CTR/NoPadding"); options.setBitLength(128); options.setDescription("d"); options.setAttributes(attributes); kp.createKey("k5",options); meta=kp.getMetadata("k5"); Assert.assertEquals("d",meta.getDescription()); Assert.assertEquals(attributes,meta.getAttributes()); KeyProviderDelegationTokenExtension kpdte=KeyProviderDelegationTokenExtension.createKeyProviderDelegationTokenExtension(kp); Credentials credentials=new Credentials(); kpdte.addDelegationTokens("foo",credentials); Assert.assertEquals(1,credentials.getAllTokens().size()); InetSocketAddress kmsAddr=new InetSocketAddress(getKMSUrl().getHost(),getKMSUrl().getPort()); Assert.assertEquals(new Text("kms-dt"),credentials.getToken(SecurityUtil.buildTokenService(kmsAddr)).getKind()); return null; } } ); }

Class: org.apache.hadoop.examples.TestWordStats

InternalCallVerifier EqualityVerifier 
@Test public void testGetTheStandardDeviation() throws Exception { String args[]=new String[2]; args[0]=INPUT; args[1]=STDDEV_OUTPUT; WordStandardDeviation wsd=new WordStandardDeviation(); ToolRunner.run(new Configuration(),wsd,args); double stddev=wsd.getStandardDeviation(); WordStdDevReader wr=new WordStdDevReader(); assertEquals(stddev,wr.read(INPUT),0.0); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetTheMean() throws Exception { String args[]=new String[2]; args[0]=INPUT; args[1]=MEAN_OUTPUT; WordMean wm=new WordMean(); ToolRunner.run(new Configuration(),wm,args); double mean=wm.getMean(); WordMeanReader wr=new WordMeanReader(); assertEquals(mean,wr.read(INPUT),0.0); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetTheMedian() throws Exception { String args[]=new String[2]; args[0]=INPUT; args[1]=MEDIAN_OUTPUT; WordMedian wm=new WordMedian(); ToolRunner.run(new Configuration(),wm,args); double median=wm.getMedian(); WordMedianReader wr=new WordMedianReader(); assertEquals(median,wr.read(INPUT),0.0); }

Class: org.apache.hadoop.fs.FCStatisticsBaseTest

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testStatisticsOperations() throws Exception { final Statistics stats=new Statistics("file"); Assert.assertEquals(0L,stats.getBytesRead()); Assert.assertEquals(0L,stats.getBytesWritten()); Assert.assertEquals(0,stats.getWriteOps()); stats.incrementBytesWritten(1000); Assert.assertEquals(1000L,stats.getBytesWritten()); Assert.assertEquals(0,stats.getWriteOps()); stats.incrementWriteOps(123); Assert.assertEquals(123,stats.getWriteOps()); Thread thread=new Thread(){ @Override public void run(){ stats.incrementWriteOps(1); } } ; thread.start(); Uninterruptibles.joinUninterruptibly(thread); Assert.assertEquals(124,stats.getWriteOps()); Statistics stats2=new Statistics(stats); stats.reset(); Assert.assertEquals(0,stats.getWriteOps()); Assert.assertEquals(0L,stats.getBytesWritten()); Assert.assertEquals(0L,stats.getBytesRead()); Assert.assertEquals(124,stats2.getWriteOps()); Assert.assertEquals(1000L,stats2.getBytesWritten()); Assert.assertEquals(0L,stats2.getBytesRead()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testStatistics() throws IOException, URISyntaxException { URI fsUri=getFsUri(); Statistics stats=FileContext.getStatistics(fsUri); Assert.assertEquals(0,stats.getBytesRead()); Path filePath=fileContextTestHelper.getTestRootPath(fc,"file1"); createFile(fc,filePath,numBlocks,blockSize); Assert.assertEquals(0,stats.getBytesRead()); verifyWrittenBytes(stats); FSDataInputStream fstr=fc.open(filePath); byte[] buf=new byte[blockSize]; int bytesRead=fstr.read(buf,0,blockSize); fstr.read(0,buf,0,blockSize); Assert.assertEquals(blockSize,bytesRead); verifyReadBytes(stats); verifyWrittenBytes(stats); verifyReadBytes(FileContext.getStatistics(getFsUri())); Map statsMap=FileContext.getAllStatistics(); URI exactUri=getSchemeAuthorityUri(); verifyWrittenBytes(statsMap.get(exactUri)); fc.delete(filePath,true); }

Class: org.apache.hadoop.fs.FSMainOperationsBaseTest

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testListStatusFilterWithNoMatches() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA2),getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.listStatus(getTestRootPath(fSys,"test"),TEST_X_FILTER); Assert.assertEquals(0,filteredPaths.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/a??"),TEST_X_FILTER); Assert.assertEquals(2,filteredPaths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusSomeMatchesInDirectories() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop*")); Assert.assertEquals(2,paths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop"),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop2"),paths)); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOverwrite() throws IOException { Path path=getTestRootPath(fSys,"test/hadoop/file"); fSys.mkdirs(path.getParent()); createFile(path); Assert.assertTrue("Exists",exists(fSys,path)); Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen()); try { createFile(path); Assert.fail("Should throw IOException."); } catch ( IOException e) { } FSDataOutputStream out=fSys.create(path,true,4096); out.write(data,0,data.length); out.close(); Assert.assertTrue("Exists",exists(fSys,path)); Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWorkingDirectory() throws Exception { Path workDir=new Path(getAbsoluteTestRootPath(fSys),new Path("test")); fSys.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); fSys.setWorkingDirectory(new Path(".")); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); fSys.setWorkingDirectory(new Path("..")); Assert.assertEquals(workDir.getParent(),fSys.getWorkingDirectory()); workDir=new Path(getAbsoluteTestRootPath(fSys),new Path("test")); fSys.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); Path relativeDir=new Path("existingDir1"); Path absoluteDir=new Path(workDir,"existingDir1"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(relativeDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); absoluteDir=getTestRootPath(fSys,"test/existingDir2"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); Path absolutePath=new Path(absoluteDir,"foo"); createFile(fSys,absolutePath); fSys.open(new Path("foo")).close(); fSys.mkdirs(new Path("newDir")); Assert.assertTrue(isDir(fSys,new Path(absoluteDir,"newDir"))); }

InternalCallVerifier EqualityVerifier 
@Test public void testWDAbsolute() throws IOException { Path absoluteDir=new Path(fSys.getUri() + "/test/existingDir"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGlobStatusFilterWithEmptyPathResults() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/?"),DEFAULT_FILTER); Assert.assertEquals(0,filteredPaths.length); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/?"),TEST_X_FILTER); Assert.assertEquals(0,filteredPaths.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/a??"),DEFAULT_FILTER); Assert.assertEquals(3,filteredPaths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusNonExistentFile() throws Exception { FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf")); Assert.assertNull(paths); paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf/?")); Assert.assertEquals(0,paths.length); paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoopfsdf/xyz*/?")); Assert.assertEquals(0,paths.length); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGlobStatusWithNoMatchesInPath() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/?")); Assert.assertEquals(0,paths.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteInNonExistentDirectory() throws IOException { Path path=getTestRootPath(fSys,"test/hadoop/file"); Assert.assertFalse("Parent doesn't exist",exists(fSys,path.getParent())); createFile(path); Assert.assertTrue("Exists",exists(fSys,path)); Assert.assertEquals("Length",data.length,fSys.getFileStatus(path).getLen()); Assert.assertTrue("Parent exists",exists(fSys,path.getParent())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/*"),TEST_X_FILTER); Assert.assertEquals(2,filteredPaths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatus() throws Exception { Path[] testDirs={getTestRootPath(fSys,"test/hadoop/a"),getTestRootPath(fSys,"test/hadoop/b"),getTestRootPath(fSys,"test/hadoop/c/1")}; Assert.assertFalse(exists(fSys,testDirs[0])); for ( Path path : testDirs) { fSys.mkdirs(path); } FileStatus[] paths=fSys.listStatus(getTestRootPath(fSys,"test")); Assert.assertEquals(1,paths.length); Assert.assertEquals(getTestRootPath(fSys,"test/hadoop"),paths[0].getPath()); paths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop")); Assert.assertEquals(3,paths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/a"),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/b"),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,"test/hadoop/c"),paths)); paths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop/a")); Assert.assertEquals(0,paths.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AXX)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/*"),DEFAULT_FILTER); Assert.assertEquals(3,filteredPaths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop/ax?")); Assert.assertEquals(2,paths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),paths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusWithMultipleWildCardMatches() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] paths=fSys.globStatus(getTestRootPath(fSys,"test/hadoop*/*")); Assert.assertEquals(4,paths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),paths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AAA2),paths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatusFilterWithSomeMatches() throws Exception { Path[] testDirs={getTestRootPath(fSys,TEST_DIR_AAA),getTestRootPath(fSys,TEST_DIR_AXA),getTestRootPath(fSys,TEST_DIR_AXX),getTestRootPath(fSys,TEST_DIR_AAA2)}; if (exists(fSys,testDirs[0]) == false) { for ( Path path : testDirs) { fSys.mkdirs(path); } } FileStatus[] filteredPaths=fSys.listStatus(getTestRootPath(fSys,"test/hadoop"),TEST_X_FILTER); Assert.assertEquals(2,filteredPaths.length); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsTestRootPath(getTestRootPath(fSys,TEST_DIR_AXX),filteredPaths)); }

Class: org.apache.hadoop.fs.FileContextMainOperationsBaseTest

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusSomeMatchesInDirectories() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop*")); Assert.assertEquals(2,paths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop"),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop2"),paths)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGlobStatusFilterWithEmptyPathResults() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/?"),DEFAULT_FILTER); Assert.assertEquals(0,filteredPaths.length); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testListStatusFilterWithNoMatches() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA2),getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().listStatus(getTestRootPath(fc,"test"),TEST_X_FILTER); Assert.assertEquals(0,filteredPaths.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultiplePathMatchesAndNonTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/*"),TEST_X_FILTER); Assert.assertEquals(2,filteredPaths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testOpen2() throws IOException { final Path rootPath=getTestRootPath(fc,"test"); final Path path=new Path(rootPath,"zoo"); createFile(path); final long length=fc.getFileStatus(path).getLen(); FSDataInputStream fsdis=fc.open(path,2048); try { byte[] bb=new byte[(int)length]; fsdis.readFully(bb); assertArrayEquals(data,bb); } finally { fsdis.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusWithMultipleWildCardMatches() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop*/*")); Assert.assertEquals(4,paths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA2),paths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultiplePathWildcardsAndNonTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/a??"),TEST_X_FILTER); Assert.assertEquals(2,filteredPaths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testUnsupportedSymlink() throws IOException { Path file=getTestRootPath(fc,"file"); Path link=getTestRootPath(fc,"linkToFile"); if (!fc.getDefaultFileSystem().supportsSymlinks()) { try { fc.createSymlink(file,link,false); Assert.fail("Created a symlink on a file system that " + "does not support symlinks."); } catch ( IOException e) { } createFile(file); try { fc.getLinkTarget(file); Assert.fail("Got a link target on a file system that " + "does not support symlinks."); } catch ( IOException e) { } Assert.assertEquals(fc.getFileStatus(file),fc.getFileLinkStatus(file)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetFileContext1() throws IOException { final Path rootPath=getTestRootPath(fc,"test"); AbstractFileSystem asf=fc.getDefaultFileSystem(); FileContext fc2=FileContext.getFileContext(asf); final Path path=new Path(rootPath,"zoo"); FSDataOutputStream out=fc2.create(path,EnumSet.of(CREATE),Options.CreateOpts.createParent()); out.close(); Path pathResolved=fc2.resolvePath(path); assertEquals(pathResolved.toUri().getPath(),path.toUri().getPath()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithSomePathMatchesAndTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/*"),DEFAULT_FILTER); Assert.assertEquals(3,filteredPaths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteInNonExistentDirectory() throws IOException { Path path=getTestRootPath(fc,"test/hadoop/file"); Assert.assertFalse("Parent doesn't exist",exists(fc,path.getParent())); createFile(path); Assert.assertTrue("Exists",exists(fc,path)); Assert.assertEquals("Length",data.length,fc.getFileStatus(path).getLen()); Assert.assertTrue("Parent exists",exists(fc,path.getParent())); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusNonExistentFile() throws Exception { FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf")); Assert.assertNull(paths); paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf/?")); Assert.assertEquals(0,paths.length); paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoopfsdf/xyz*/?")); Assert.assertEquals(0,paths.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusFilterWithMultipleWildCardMatchesAndTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/a??"),DEFAULT_FILTER); Assert.assertEquals(3,filteredPaths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AAA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),filteredPaths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),filteredPaths)); }

EqualityVerifier PublicFieldVerifier 
@Test public void testFileContextStatistics() throws IOException { FileContext.clearStatistics(); final Path rootPath=getTestRootPath(fc,"test"); final Path path=new Path(rootPath,"zoo"); createFile(path); byte[] bb=new byte[data.length]; FSDataInputStream fsdis=fc.open(path); try { fsdis.read(bb); } finally { fsdis.close(); } assertArrayEquals(data,bb); FileContext.printStatistics(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSetVerifyChecksum() throws IOException { final Path rootPath=getTestRootPath(fc,"test"); final Path path=new Path(rootPath,"zoo"); FSDataOutputStream out=fc.create(path,EnumSet.of(CREATE),Options.CreateOpts.createParent()); try { fc.setVerifyChecksum(true,path); out.write(data,0,data.length); } finally { out.close(); } FileStatus fileStatus=fc.getFileStatus(path); final long len=fileStatus.getLen(); assertTrue(len == data.length); byte[] bb=new byte[(int)len]; FSDataInputStream fsdis=fc.open(path); try { fsdis.read(bb); } finally { fsdis.close(); } assertArrayEquals(data,bb); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGlobStatusFilterWithNoMatchingPathsAndNonTrivialFilter() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AXX)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] filteredPaths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/?"),TEST_X_FILTER); Assert.assertEquals(0,filteredPaths.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGlobStatusWithMultipleMatchesOfSingleChar() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/ax?")); Assert.assertEquals(2,paths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXA),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,TEST_DIR_AXX),paths)); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWorkingDirectory() throws Exception { Path workDir=new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test")); fc.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fc.getWorkingDirectory()); fc.setWorkingDirectory(new Path(".")); Assert.assertEquals(workDir,fc.getWorkingDirectory()); fc.setWorkingDirectory(new Path("..")); Assert.assertEquals(workDir.getParent(),fc.getWorkingDirectory()); workDir=new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc),new Path("test")); fc.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fc.getWorkingDirectory()); Path relativeDir=new Path("existingDir1"); Path absoluteDir=new Path(workDir,"existingDir1"); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); fc.setWorkingDirectory(relativeDir); Assert.assertEquals(absoluteDir,fc.getWorkingDirectory()); absoluteDir=getTestRootPath(fc,"test/existingDir2"); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); fc.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fc.getWorkingDirectory()); Path absolutePath=new Path(absoluteDir,"foo"); fc.create(absolutePath,EnumSet.of(CREATE)).close(); fc.open(new Path("foo")).close(); fc.mkdir(new Path("newDir"),FileContext.DEFAULT_PERM,true); Assert.assertTrue(isDir(fc,new Path(absoluteDir,"newDir"))); absoluteDir=getTestRootPath(fc,"nonexistingPath"); try { fc.setWorkingDirectory(absoluteDir); Assert.fail("cd to non existing dir should have failed"); } catch ( Exception e) { } absoluteDir=new Path(localFsRootPath,"existingDir"); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); fc.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fc.getWorkingDirectory()); Path aRegularFile=new Path("aRegularFile"); createFile(aRegularFile); try { fc.setWorkingDirectory(aRegularFile); fail("An IOException expected."); } catch ( IOException ioe) { } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGlobStatusWithNoMatchesInPath() throws Exception { Path[] testDirs={getTestRootPath(fc,TEST_DIR_AAA),getTestRootPath(fc,TEST_DIR_AXA),getTestRootPath(fc,TEST_DIR_AXX),getTestRootPath(fc,TEST_DIR_AAA2)}; if (exists(fc,testDirs[0]) == false) { for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } } FileStatus[] paths=fc.util().globStatus(getTestRootPath(fc,"test/hadoop/?")); Assert.assertEquals(0,paths.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatus() throws Exception { Path[] testDirs={getTestRootPath(fc,"test/hadoop/a"),getTestRootPath(fc,"test/hadoop/b"),getTestRootPath(fc,"test/hadoop/c/1")}; Assert.assertFalse(exists(fc,testDirs[0])); for ( Path path : testDirs) { fc.mkdir(path,FsPermission.getDefault(),true); } FileStatus[] paths=fc.util().listStatus(getTestRootPath(fc,"test")); Assert.assertEquals(1,paths.length); Assert.assertEquals(getTestRootPath(fc,"test/hadoop"),paths[0].getPath()); paths=fc.util().listStatus(getTestRootPath(fc,"test/hadoop")); Assert.assertEquals(3,paths.length); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/a"),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/b"),paths)); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/c"),paths)); paths=fc.util().listStatus(getTestRootPath(fc,"test/hadoop/a")); Assert.assertEquals(0,paths.length); RemoteIterator pathsIterator=fc.listStatus(getTestRootPath(fc,"test")); Assert.assertEquals(getTestRootPath(fc,"test/hadoop"),pathsIterator.next().getPath()); Assert.assertFalse(pathsIterator.hasNext()); pathsIterator=fc.listStatus(getTestRootPath(fc,"test/hadoop")); FileStatus[] subdirs=new FileStatus[3]; int i=0; while (i < 3 && pathsIterator.hasNext()) { subdirs[i++]=pathsIterator.next(); } Assert.assertFalse(pathsIterator.hasNext()); Assert.assertTrue(i == 3); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/a"),subdirs)); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/b"),subdirs)); Assert.assertTrue(containsPath(getTestRootPath(fc,"test/hadoop/c"),subdirs)); pathsIterator=fc.listStatus(getTestRootPath(fc,"test/hadoop/a")); Assert.assertFalse(pathsIterator.hasNext()); }

Class: org.apache.hadoop.fs.FileContextPermissionBase

EqualityVerifier 
@Test public void testUgi() throws IOException, InterruptedException { UserGroupInformation otherUser=UserGroupInformation.createRemoteUser("otherUser"); FileContext newFc=otherUser.doAs(new PrivilegedExceptionAction(){ @Override public FileContext run() throws Exception { FileContext newFc=FileContext.getFileContext(); return newFc; } } ); assertEquals("otherUser",newFc.getUgi().getUserName()); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSetOwner() throws IOException { if (Path.WINDOWS) { System.out.println("Cannot run test for Windows"); return; } String filename="bar"; Path f=fileContextTestHelper.getTestRootPath(fc,filename); createFile(fc,f); List groups=null; try { groups=getGroups(); System.out.println(filename + ": " + fc.getFileStatus(f).getPermission()); } catch ( IOException e) { System.out.println(StringUtils.stringifyException(e)); System.out.println("Cannot run test"); return; } if (groups == null || groups.size() < 1) { System.out.println("Cannot run test: need at least one group. groups=" + groups); return; } try { String g0=groups.get(0); fc.setOwner(f,null,g0); Assert.assertEquals(g0,fc.getFileStatus(f).getGroup()); if (groups.size() > 1) { String g1=groups.get(1); fc.setOwner(f,null,g1); Assert.assertEquals(g1,fc.getFileStatus(f).getGroup()); } else { System.out.println("Not testing changing the group since user " + "belongs to only one group."); } try { fc.setOwner(f,null,null); fail("Exception expected."); } catch ( IllegalArgumentException iae) { } } finally { cleanupFile(fc,f); } }

Class: org.apache.hadoop.fs.FileContextURIBase

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testModificationTime() throws IOException { String testFile="file1"; long fc2ModificationTime, fc1ModificationTime; Path testPath=qualifiedPath(testFile,fc2); createFile(fc1,testPath); fc1ModificationTime=fc1.getFileStatus(testPath).getModificationTime(); fc2ModificationTime=fc2.getFileStatus(testPath).getModificationTime(); Assert.assertEquals(fc1ModificationTime,fc2ModificationTime); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateFileInNonExistingDirectory() throws IOException { String fileName="testDir/testFile"; Path testPath=qualifiedPath(fileName,fc2); Assert.assertFalse(exists(fc2,testPath)); createFile(fc1,testPath); Assert.assertTrue(isDir(fc2,testPath.getParent())); Assert.assertEquals("testDir",testPath.getParent().getName()); Assert.assertTrue(exists(fc2,testPath)); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatus() throws Exception { final String hPrefix="test/hadoop"; final String[] dirs={hPrefix + "/a",hPrefix + "/b",hPrefix + "/c",hPrefix + "/1",hPrefix + "/#@#@",hPrefix + "/&*#$#$@234"}; ArrayList testDirs=new ArrayList(); for ( String d : dirs) { if (!isTestableFileNameOnPlatform(d)) { continue; } testDirs.add(qualifiedPath(d,fc2)); } Assert.assertFalse(exists(fc1,testDirs.get(0))); for ( Path path : testDirs) { fc1.mkdir(path,FsPermission.getDefault(),true); } FileStatus[] paths=fc1.util().listStatus(qualifiedPath("test",fc1)); Assert.assertEquals(1,paths.length); Assert.assertEquals(qualifiedPath(hPrefix,fc1),paths[0].getPath()); paths=fc1.util().listStatus(qualifiedPath(hPrefix,fc1)); Assert.assertEquals(testDirs.size(),paths.length); for (int i=0; i < testDirs.size(); i++) { boolean found=false; for (int j=0; j < paths.length; j++) { if (qualifiedPath(testDirs.get(i).toString(),fc1).equals(paths[j].getPath())) { found=true; } } Assert.assertTrue(testDirs.get(i) + " not found",found); } paths=fc1.util().listStatus(qualifiedPath(dirs[0],fc1)); Assert.assertEquals(0,paths.length); RemoteIterator pathsItor=fc1.listStatus(qualifiedPath("test",fc1)); Assert.assertEquals(qualifiedPath(hPrefix,fc1),pathsItor.next().getPath()); Assert.assertFalse(pathsItor.hasNext()); pathsItor=fc1.listStatus(qualifiedPath(hPrefix,fc1)); int dirLen=0; for (; pathsItor.hasNext(); dirLen++) { boolean found=false; FileStatus stat=pathsItor.next(); for (int j=0; j < dirs.length; j++) { if (qualifiedPath(dirs[j],fc1).equals(stat.getPath())) { found=true; break; } } Assert.assertTrue(stat.getPath() + " not found",found); } Assert.assertEquals(testDirs.size(),dirLen); pathsItor=fc1.listStatus(qualifiedPath(dirs[0],fc1)); Assert.assertFalse(pathsItor.hasNext()); }

Class: org.apache.hadoop.fs.SymlinkBaseTest

InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testCreateLinkToDotDotPrefix() throws IOException { Path file=new Path(testBaseDir1(),"file"); Path dir=new Path(testBaseDir1(),"test"); Path link=new Path(testBaseDir1(),"test/link"); createAndWriteFile(file); wrapper.mkdir(dir,FsPermission.getDefault(),false); wrapper.setWorkingDirectory(dir); wrapper.createSymlink(new Path("../file"),link,false); readFile(link); assertEquals(new Path("../file"),wrapper.getLinkTarget(link)); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameSymlinkToDirItLinksTo() throws IOException { if ("file".equals(getScheme())) { return; } Path dir=new Path(testBaseDir1(),"dir"); Path link=new Path(testBaseDir1(),"linkToDir"); wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false); wrapper.createSymlink(dir,link,false); try { wrapper.rename(link,dir); fail("Renamed symlink to its target"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } assertTrue(wrapper.isDir(dir)); assertTrue(wrapper.exists(link)); assertTrue(wrapper.isSymlink(link)); assertEquals(dir,wrapper.getLinkTarget(link)); try { wrapper.rename(link,dir,Rename.OVERWRITE); fail("Renamed symlink to its target"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } assertTrue(wrapper.isDir(dir)); assertTrue(wrapper.exists(link)); assertTrue(wrapper.isSymlink(link)); assertEquals(dir,wrapper.getLinkTarget(link)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testSetWDNotResolvesLinks() throws IOException { Path dir=new Path(testBaseDir1()); Path linkToDir=new Path(testBaseDir1() + "/link"); wrapper.createSymlink(dir,linkToDir,false); wrapper.setWorkingDirectory(linkToDir); assertEquals(linkToDir.getName(),wrapper.getWorkingDirectory().getName()); }

BranchVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testSetTimes() throws IOException { Path file=new Path(testBaseDir1(),"file"); Path link=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(file,link,false); long at=wrapper.getFileLinkStatus(link).getAccessTime(); wrapper.setTimes(link,2L,3L); if (!"file".equals(getScheme())) { assertEquals(at,wrapper.getFileLinkStatus(link).getAccessTime()); assertEquals(3,wrapper.getFileStatus(file).getAccessTime()); assertEquals(2,wrapper.getFileStatus(file).getModificationTime()); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkUsingFullyQualPaths() throws IOException { Path fileAbs=new Path(testBaseDir1(),"file"); Path linkAbs=new Path(testBaseDir1(),"linkToFile"); Path fileQual=new Path(testURI().toString(),fileAbs); Path linkQual=new Path(testURI().toString(),linkAbs); createAndWriteFile(fileAbs); wrapper.createSymlink(fileQual,linkQual,false); checkLink(linkAbs,"file".equals(getScheme()) ? fileAbs : fileQual,fileQual); Path dir1=new Path(testBaseDir1()); Path dir2=new Path(testBaseDir2()); Path linkViaDir2=new Path(testBaseDir2(),"linkToFile"); wrapper.rename(dir1,dir2,Rename.OVERWRITE); assertEquals(fileQual,wrapper.getFileLinkStatus(linkViaDir2).getSymlink()); try { readFile(linkViaDir2); fail("The target should not exist"); } catch ( FileNotFoundException x) { } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testRecursiveLinks() throws IOException { Path link1=new Path(testBaseDir1() + "/link1"); Path link2=new Path(testBaseDir1() + "/link2"); wrapper.createSymlink(link1,link2,false); wrapper.createSymlink(link2,link1,false); try { readFile(link1); fail("Read recursive link"); } catch ( FileNotFoundException f) { } catch ( IOException x) { assertEquals("Possible cyclic loop while following symbolic link " + link1.toString(),x.getMessage()); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameSymlinkToFileItLinksTo() throws IOException { if ("file".equals(getScheme())) { return; } Path file=new Path(testBaseDir1(),"file"); Path link=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(file,link,false); try { wrapper.rename(link,file); fail("Renamed symlink to its target"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } assertTrue(wrapper.isFile(file)); assertTrue(wrapper.exists(link)); assertTrue(wrapper.isSymlink(link)); assertEquals(file,wrapper.getLinkTarget(link)); try { wrapper.rename(link,file,Rename.OVERWRITE); fail("Renamed symlink to its target"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } assertTrue(wrapper.isFile(file)); assertTrue(wrapper.exists(link)); assertTrue(wrapper.isSymlink(link)); assertEquals(file,wrapper.getLinkTarget(link)); }

BranchVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testStatLinkToFile() throws IOException { assumeTrue(!emulatingSymlinksOnWindows()); Path file=new Path(testBaseDir1() + "/file"); Path linkToFile=new Path(testBaseDir1() + "/linkToFile"); createAndWriteFile(file); wrapper.createSymlink(file,linkToFile,false); assertFalse(wrapper.getFileLinkStatus(linkToFile).isDirectory()); assertTrue(wrapper.isSymlink(linkToFile)); assertTrue(wrapper.isFile(linkToFile)); assertFalse(wrapper.isDir(linkToFile)); assertEquals(file,wrapper.getLinkTarget(linkToFile)); if (!"file".equals(getScheme())) { assertEquals(wrapper.getFileStatus(file),wrapper.getFileStatus(linkToFile)); assertEquals(wrapper.makeQualified(file),wrapper.getFileStatus(linkToFile).getPath()); assertEquals(wrapper.makeQualified(linkToFile),wrapper.getFileLinkStatus(linkToFile).getPath()); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testCreateLinkToDotDot() throws IOException { Path file=new Path(testBaseDir1(),"test/file"); Path dotDot=new Path(testBaseDir1(),"test/.."); Path linkToDir=new Path(testBaseDir2(),"linkToDir"); Path fileViaLink=new Path(linkToDir,"test/file"); assertEquals(new Path(testBaseDir1()),dotDot); createAndWriteFile(file); wrapper.createSymlink(dotDot,linkToDir,false); readFile(fileViaLink); assertEquals(fileSize,wrapper.getFileStatus(fileViaLink).getLen()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testAccessDirViaSymlink() throws IOException { Path baseDir=new Path(testBaseDir1()); Path dir=new Path(testBaseDir1(),"dir"); Path linkToDir=new Path(testBaseDir2(),"linkToDir"); Path dirViaLink=new Path(linkToDir,"dir"); wrapper.createSymlink(baseDir,linkToDir,false); wrapper.mkdir(dirViaLink,FileContext.DEFAULT_PERM,true); assertTrue(wrapper.getFileStatus(dirViaLink).isDirectory()); FileStatus[] stats=wrapper.listStatus(dirViaLink); assertEquals(0,stats.length); RemoteIterator statsItor=wrapper.listStatusIterator(dirViaLink); assertFalse(statsItor.hasNext()); wrapper.delete(dirViaLink,false); assertFalse(wrapper.exists(dirViaLink)); assertFalse(wrapper.exists(dir)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testAccessFileViaInterSymlinkQualTarget() throws IOException { Path baseDir=new Path(testBaseDir1()); Path file=new Path(testBaseDir1(),"file"); Path linkToDir=new Path(testBaseDir2(),"linkToDir"); Path fileViaLink=new Path(linkToDir,"file"); wrapper.createSymlink(wrapper.makeQualified(baseDir),linkToDir,false); createAndWriteFile(fileViaLink); assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file)); assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(fileViaLink)); readFile(fileViaLink); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testStatLinkToDir() throws IOException { Path dir=new Path(testBaseDir1()); Path linkToDir=new Path(testBaseDir1() + "/linkToDir"); wrapper.createSymlink(dir,linkToDir,false); assertFalse(wrapper.getFileStatus(linkToDir).isSymlink()); assertTrue(wrapper.isDir(linkToDir)); assertFalse(wrapper.getFileLinkStatus(linkToDir).isDirectory()); assertTrue(wrapper.getFileLinkStatus(linkToDir).isSymlink()); assertFalse(wrapper.isFile(linkToDir)); assertTrue(wrapper.isDir(linkToDir)); assertEquals(dir,wrapper.getLinkTarget(linkToDir)); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkUsingAbsPaths() throws IOException { Path fileAbs=new Path(testBaseDir1() + "/file"); Path linkAbs=new Path(testBaseDir1() + "/linkToFile"); Path schemeAuth=new Path(testURI().toString()); Path fileQual=new Path(schemeAuth,testBaseDir1() + "/file"); createAndWriteFile(fileAbs); wrapper.createSymlink(fileAbs,linkAbs,false); checkLink(linkAbs,fileAbs,fileQual); Path dir1=new Path(testBaseDir1()); Path dir2=new Path(testBaseDir2()); Path linkViaDir2=new Path(testBaseDir2(),"linkToFile"); wrapper.rename(dir1,dir2,Rename.OVERWRITE); assertEquals(fileQual,wrapper.getFileLinkStatus(linkViaDir2).getSymlink()); try { readFile(linkViaDir2); fail("The target should not exist"); } catch ( FileNotFoundException x) { } }

InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testAccessFileViaInterSymlinkRelTarget() throws IOException { assumeTrue(!"file".equals(getScheme())); Path dir=new Path(testBaseDir1(),"dir"); Path file=new Path(dir,"file"); Path linkToDir=new Path(testBaseDir1(),"linkToDir"); Path fileViaLink=new Path(linkToDir,"file"); wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false); wrapper.createSymlink(new Path("dir"),linkToDir,false); createAndWriteFile(fileViaLink); assertEquals(wrapper.makeQualified(file),wrapper.getFileStatus(file).getPath()); assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file)); assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(fileViaLink)); assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(file)); }

InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testStatRelLinkToFile() throws IOException { assumeTrue(!"file".equals(getScheme())); Path file=new Path(testBaseDir1(),"file"); Path linkToFile=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(new Path("file"),linkToFile,false); assertEquals(wrapper.getFileStatus(file),wrapper.getFileStatus(linkToFile)); assertEquals(wrapper.makeQualified(file),wrapper.getFileStatus(linkToFile).getPath()); assertEquals(wrapper.makeQualified(linkToFile),wrapper.getFileLinkStatus(linkToFile).getPath()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testCreateLinkUsingRelPaths() throws IOException { Path fileAbs=new Path(testBaseDir1(),"file"); Path linkAbs=new Path(testBaseDir1(),"linkToFile"); Path schemeAuth=new Path(testURI().toString()); Path fileQual=new Path(schemeAuth,testBaseDir1() + "/file"); createAndWriteFile(fileAbs); wrapper.setWorkingDirectory(new Path(testBaseDir1())); wrapper.createSymlink(new Path("file"),new Path("linkToFile"),false); checkLink(linkAbs,new Path("file"),fileQual); Path dir1=new Path(testBaseDir1()); Path dir2=new Path(testBaseDir2()); Path linkViaDir2=new Path(testBaseDir2(),"linkToFile"); Path fileViaDir2=new Path(schemeAuth,testBaseDir2() + "/file"); wrapper.rename(dir1,dir2,Rename.OVERWRITE); FileStatus[] stats=wrapper.listStatus(dir2); assertEquals(fileViaDir2,wrapper.getFileLinkStatus(linkViaDir2).getSymlink()); readFile(linkViaDir2); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testMkdirExistingLink() throws IOException { Path file=new Path(testBaseDir1() + "/targetFile"); createAndWriteFile(file); Path dir=new Path(testBaseDir1() + "/link"); wrapper.createSymlink(file,dir,false); try { wrapper.mkdir(dir,FileContext.DEFAULT_PERM,false); fail("Created a dir where a symlink exists"); } catch ( FileAlreadyExistsException e) { } catch ( IOException e) { assertEquals("file",getScheme()); } }

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testRenameSymlinkToExistingFile() throws IOException { Path file1=new Path(testBaseDir1(),"file"); Path file2=new Path(testBaseDir1(),"someFile"); Path link=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file1); createAndWriteFile(file2); wrapper.createSymlink(file2,link,false); try { wrapper.rename(link,file1); fail("Renamed w/o passing overwrite"); } catch ( IOException e) { assertTrue(unwrapException(e) instanceof FileAlreadyExistsException); } wrapper.rename(link,file1,Rename.OVERWRITE); assertFalse(wrapper.exists(link)); if (!emulatingSymlinksOnWindows()) { assertTrue(wrapper.getFileLinkStatus(file1).isSymlink()); assertEquals(file2,wrapper.getLinkTarget(file1)); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testLinkStatusAndTargetWithNonLink() throws IOException { Path schemeAuth=new Path(testURI().toString()); Path dir=new Path(testBaseDir1()); Path dirQual=new Path(schemeAuth,dir.toString()); Path file=new Path(testBaseDir1(),"file"); Path fileQual=new Path(schemeAuth,file.toString()); createAndWriteFile(file); assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file)); assertEquals(wrapper.getFileStatus(dir),wrapper.getFileLinkStatus(dir)); try { wrapper.getLinkTarget(file); fail("Get link target on non-link should throw an IOException"); } catch ( IOException x) { assertEquals("Path " + fileQual + " is not a symbolic link",x.getMessage()); } try { wrapper.getLinkTarget(dir); fail("Get link target on non-link should throw an IOException"); } catch ( IOException x) { assertEquals("Path " + dirQual + " is not a symbolic link",x.getMessage()); } }

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkUsingPartQualPath2() throws IOException { Path link=new Path(testBaseDir1(),"linkToFile"); Path fileWoScheme=new Path("//" + testURI().getAuthority() + testBaseDir1()+ "/file"); if ("file".equals(getScheme())) { return; } wrapper.createSymlink(fileWoScheme,link,false); assertEquals(fileWoScheme,wrapper.getLinkTarget(link)); assertEquals(fileWoScheme.toString(),wrapper.getFileLinkStatus(link).getSymlink().toString()); try { readFile(link); fail("Accessed a file with w/o scheme"); } catch ( IOException e) { if (wrapper instanceof FileContextTestWrapper) { assertEquals("No AbstractFileSystem for scheme: null",e.getMessage()); } else if (wrapper instanceof FileSystemTestWrapper) { assertEquals("No FileSystem for scheme: null",e.getMessage()); } } }

InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkViaLink() throws IOException { assumeTrue(!emulatingSymlinksOnWindows()); Path dir1=new Path(testBaseDir1()); Path file=new Path(testBaseDir1(),"file"); Path linkToDir=new Path(testBaseDir2(),"linkToDir"); Path fileViaLink=new Path(linkToDir,"file"); Path linkToFile=new Path(linkToDir,"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(dir1,linkToDir,false); wrapper.createSymlink(fileViaLink,linkToFile,false); assertTrue(wrapper.isFile(linkToFile)); assertTrue(wrapper.getFileLinkStatus(linkToFile).isSymlink()); readFile(linkToFile); assertEquals(fileSize,wrapper.getFileStatus(linkToFile).getLen()); assertEquals(fileViaLink,wrapper.getLinkTarget(linkToFile)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testAccessFileViaInterSymlinkAbsTarget() throws IOException { Path baseDir=new Path(testBaseDir1()); Path file=new Path(testBaseDir1(),"file"); Path fileNew=new Path(baseDir,"fileNew"); Path linkToDir=new Path(testBaseDir2(),"linkToDir"); Path fileViaLink=new Path(linkToDir,"file"); Path fileNewViaLink=new Path(linkToDir,"fileNew"); wrapper.createSymlink(baseDir,linkToDir,false); createAndWriteFile(fileViaLink); assertTrue(wrapper.exists(fileViaLink)); assertTrue(wrapper.isFile(fileViaLink)); assertFalse(wrapper.isDir(fileViaLink)); assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink()); assertFalse(wrapper.isDir(fileViaLink)); assertEquals(wrapper.getFileStatus(file),wrapper.getFileLinkStatus(file)); assertEquals(wrapper.getFileStatus(fileViaLink),wrapper.getFileLinkStatus(fileViaLink)); readFile(fileViaLink); appendToFile(fileViaLink); wrapper.rename(fileViaLink,fileNewViaLink); assertFalse(wrapper.exists(fileViaLink)); assertTrue(wrapper.exists(fileNewViaLink)); readFile(fileNewViaLink); assertEquals(wrapper.getFileBlockLocations(fileNew,0,1).length,wrapper.getFileBlockLocations(fileNewViaLink,0,1).length); assertEquals(wrapper.getFileChecksum(fileNew),wrapper.getFileChecksum(fileNewViaLink)); wrapper.delete(fileNewViaLink,true); assertFalse(wrapper.exists(fileNewViaLink)); }

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkUsingPartQualPath1() throws IOException { assumeTrue(!"file".equals(getScheme())); Path schemeAuth=new Path(testURI().toString()); Path fileWoHost=new Path(getScheme() + "://" + testBaseDir1()+ "/file"); Path link=new Path(testBaseDir1() + "/linkToFile"); Path linkQual=new Path(schemeAuth,testBaseDir1() + "/linkToFile"); FSTestWrapper localWrapper=wrapper.getLocalFSWrapper(); wrapper.createSymlink(fileWoHost,link,false); assertEquals(fileWoHost,wrapper.getLinkTarget(linkQual)); assertEquals(fileWoHost.toString(),wrapper.getFileLinkStatus(link).getSymlink().toString()); assertEquals(fileWoHost.toString(),wrapper.getFileLinkStatus(linkQual).getSymlink().toString()); if (wrapper instanceof FileContextTestWrapper) { assertEquals(fileWoHost.toString(),localWrapper.getFileLinkStatus(linkQual).getSymlink().toString()); } try { readFile(link); fail("DFS requires URIs with schemes have an authority"); } catch ( java.lang.RuntimeException e) { assertTrue(wrapper instanceof FileContextTestWrapper); } catch ( FileNotFoundException e) { assertTrue(wrapper instanceof FileSystemTestWrapper); GenericTestUtils.assertExceptionContains("File does not exist: /test1/file",e); } }

Class: org.apache.hadoop.fs.TestChecksumFileSystem

EqualityVerifier 
@Test public void testgetChecksumLength() throws Exception { assertEquals(8,ChecksumFileSystem.getChecksumLength(0L,512)); assertEquals(12,ChecksumFileSystem.getChecksumLength(1L,512)); assertEquals(12,ChecksumFileSystem.getChecksumLength(512L,512)); assertEquals(16,ChecksumFileSystem.getChecksumLength(513L,512)); assertEquals(16,ChecksumFileSystem.getChecksumLength(1023L,512)); assertEquals(16,ChecksumFileSystem.getChecksumLength(1024L,512)); assertEquals(408,ChecksumFileSystem.getChecksumLength(100L,1)); assertEquals(4000000000008L,ChecksumFileSystem.getChecksumLength(10000000000000L,10)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCorruptedChecksum() throws Exception { Path testPath=new Path(TEST_ROOT_DIR,"testCorruptChecksum"); Path checksumPath=localFs.getChecksumFile(testPath); FSDataOutputStream out=localFs.create(testPath,true); out.write("testing 1 2 3".getBytes()); out.close(); assertTrue(localFs.exists(checksumPath)); FileStatus stat=localFs.getFileStatus(checksumPath); out=localFs.getRawFileSystem().create(testPath,true); out.write("testing stale checksum".getBytes()); out.close(); assertTrue(localFs.exists(checksumPath)); assertEquals(stat,localFs.getFileStatus(checksumPath)); Exception e=null; try { localFs.setVerifyChecksum(true); readFile(localFs,testPath,1024); } catch ( ChecksumException ce) { e=ce; } finally { assertNotNull("got checksum error",e); } localFs.setVerifyChecksum(false); String str=readFile(localFs,testPath,1024); assertEquals("testing stale checksum",str); }

Class: org.apache.hadoop.fs.TestCommandFormat

InternalCallVerifier EqualityVerifier 
@Test public void testOldArgsWithIndex(){ String[] arrayArgs=new String[]{"ignore","-a","b","-c"}; { CommandFormat cf=new CommandFormat(0,9,"a","c"); List parsedArgs=cf.parse(arrayArgs,0); assertEquals(setOf(),cf.getOpts()); assertEquals(listOf("ignore","-a","b","-c"),parsedArgs); } { CommandFormat cf=new CommandFormat(0,9,"a","c"); List parsedArgs=cf.parse(arrayArgs,1); assertEquals(setOf("a"),cf.getOpts()); assertEquals(listOf("b","-c"),parsedArgs); } { CommandFormat cf=new CommandFormat(0,9,"a","c"); List parsedArgs=cf.parse(arrayArgs,2); assertEquals(setOf(),cf.getOpts()); assertEquals(listOf("b","-c"),parsedArgs); } }

Class: org.apache.hadoop.fs.TestContentSummary

EqualityVerifier 
@Test public void testGetHeaderNoQuota(){ String header=" directories files bytes "; assertEquals(header,ContentSummary.getHeader(false)); }

EqualityVerifier 
@Test public void testToStringNoQuota(){ long length=11111; long fileCount=22222; long directoryCount=33333; ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount); String expected=" none inf none" + " inf 33333 22222 11111 "; assertEquals(expected,contentSummary.toString(true)); }

EqualityVerifier 
@Test public void testToStringNoShowQuota(){ long length=11111; long fileCount=22222; long directoryCount=33333; long quota=44444; long spaceConsumed=55555; long spaceQuota=66665; ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota); String expected=" 33333 22222 11111 "; assertEquals(expected,contentSummary.toString(false)); }

InternalCallVerifier EqualityVerifier 
@Test public void testReadFields() throws IOException { long length=11111; long fileCount=22222; long directoryCount=33333; long quota=44444; long spaceConsumed=55555; long spaceQuota=66666; ContentSummary contentSummary=new ContentSummary(); DataInput in=mock(DataInput.class); when(in.readLong()).thenReturn(length).thenReturn(fileCount).thenReturn(directoryCount).thenReturn(quota).thenReturn(spaceConsumed).thenReturn(spaceQuota); contentSummary.readFields(in); assertEquals("getLength",length,contentSummary.getLength()); assertEquals("getFileCount",fileCount,contentSummary.getFileCount()); assertEquals("getDirectoryCount",directoryCount,contentSummary.getDirectoryCount()); assertEquals("getQuota",quota,contentSummary.getQuota()); assertEquals("getSpaceConsumed",spaceConsumed,contentSummary.getSpaceConsumed()); assertEquals("getSpaceQuota",spaceQuota,contentSummary.getSpaceQuota()); }

EqualityVerifier 
@Test public void testToStringHumanNoShowQuota(){ long length=Long.MAX_VALUE; long fileCount=222222222; long directoryCount=33333; long quota=222256578; long spaceConsumed=55555; long spaceQuota=Long.MAX_VALUE; ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota); String expected=" 32.6 K 211.9 M 8.0 E "; assertEquals(expected,contentSummary.toString(false,true)); }

InternalCallVerifier EqualityVerifier 
@Test public void testConstructorNoQuota(){ long length=11111; long fileCount=22222; long directoryCount=33333; ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount); assertEquals("getLength",length,contentSummary.getLength()); assertEquals("getFileCount",fileCount,contentSummary.getFileCount()); assertEquals("getDirectoryCount",directoryCount,contentSummary.getDirectoryCount()); assertEquals("getQuota",-1,contentSummary.getQuota()); assertEquals("getSpaceConsumed",length,contentSummary.getSpaceConsumed()); assertEquals("getSpaceQuota",-1,contentSummary.getSpaceQuota()); }

EqualityVerifier 
@Test public void testGetHeaderWithQuota(){ String header=" name quota rem name quota space quota " + "rem space quota directories files bytes "; assertEquals(header,ContentSummary.getHeader(true)); }

InternalCallVerifier EqualityVerifier 
@Test public void testConstructorWithQuota(){ long length=11111; long fileCount=22222; long directoryCount=33333; long quota=44444; long spaceConsumed=55555; long spaceQuota=66666; ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota); assertEquals("getLength",length,contentSummary.getLength()); assertEquals("getFileCount",fileCount,contentSummary.getFileCount()); assertEquals("getDirectoryCount",directoryCount,contentSummary.getDirectoryCount()); assertEquals("getQuota",quota,contentSummary.getQuota()); assertEquals("getSpaceConsumed",spaceConsumed,contentSummary.getSpaceConsumed()); assertEquals("getSpaceQuota",spaceQuota,contentSummary.getSpaceQuota()); }

EqualityVerifier 
@Test public void testToString(){ long length=11111; long fileCount=22222; long directoryCount=33333; long quota=44444; long spaceConsumed=55555; long spaceQuota=66665; ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota); String expected=" 44444 -11111 66665" + " 11110 33333 22222 11111 "; assertEquals(expected,contentSummary.toString()); }

EqualityVerifier 
@Test public void testToStringHumanWithQuota(){ long length=Long.MAX_VALUE; long fileCount=222222222; long directoryCount=33333; long quota=222256578; long spaceConsumed=1073741825; long spaceQuota=1; ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota); String expected=" 212.0 M 1023 1 " + " -1 G 32.6 K 211.9 M 8.0 E "; assertEquals(expected,contentSummary.toString(true,true)); }

InternalCallVerifier EqualityVerifier 
@Test public void testConstructorEmpty(){ ContentSummary contentSummary=new ContentSummary(); assertEquals("getLength",0,contentSummary.getLength()); assertEquals("getFileCount",0,contentSummary.getFileCount()); assertEquals("getDirectoryCount",0,contentSummary.getDirectoryCount()); assertEquals("getQuota",0,contentSummary.getQuota()); assertEquals("getSpaceConsumed",0,contentSummary.getSpaceConsumed()); assertEquals("getSpaceQuota",0,contentSummary.getSpaceQuota()); }

EqualityVerifier 
@Test public void testToStringWithQuota(){ long length=11111; long fileCount=22222; long directoryCount=33333; long quota=44444; long spaceConsumed=55555; long spaceQuota=66665; ContentSummary contentSummary=new ContentSummary(length,fileCount,directoryCount,quota,spaceConsumed,spaceQuota); String expected=" 44444 -11111 66665 11110" + " 33333 22222 11111 "; assertEquals(expected,contentSummary.toString(true)); }

Class: org.apache.hadoop.fs.TestDFVariations

InternalCallVerifier EqualityVerifier 
@Test(timeout=5000) public void testFileSystem() throws Exception { XXDF df=new XXDF(); String expectedFileSystem=Shell.WINDOWS ? df.getDirPath().substring(0,2) : "/dev/sda3"; assertEquals("Invalid filesystem",expectedFileSystem,df.getFilesystem()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=5000) public void testMount() throws Exception { XXDF df=new XXDF(); String expectedMount=Shell.WINDOWS ? df.getDirPath().substring(0,2) : "/foo/bar"; assertEquals("Invalid mount point",expectedMount,df.getMount()); }

Class: org.apache.hadoop.fs.TestDelegationTokenRenewer

InternalCallVerifier EqualityVerifier 
@Test public void testStopRenewalWhenFsGone() throws IOException, InterruptedException { Configuration conf=mock(Configuration.class); Token token=mock(Token.class); doReturn(new Text("myservice")).when(token).getService(); doAnswer(new Answer(){ public Long answer( InvocationOnMock invocation){ return Time.now() + RENEW_CYCLE; } } ).when(token).renew(any(Configuration.class)); RenewableFileSystem fs=mock(RenewableFileSystem.class); doReturn(conf).when(fs).getConf(); doReturn(token).when(fs).getRenewToken(); renewer.addRenewAction(fs); assertEquals(1,renewer.getRenewQueueLength()); Thread.sleep(RENEW_CYCLE); verify(token,atLeast(1)).renew(eq(conf)); verify(token,atMost(2)).renew(eq(conf)); fs=null; System.gc(); System.gc(); System.gc(); Thread.sleep(RENEW_CYCLE); verify(token,atLeast(1)).renew(eq(conf)); verify(token,atMost(2)).renew(eq(conf)); assertEquals(0,renewer.getRenewQueueLength()); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetNewTokenOnRenewFailure() throws IOException, InterruptedException { Text service=new Text("myservice"); Configuration conf=mock(Configuration.class); final Token token1=mock(Token.class); doReturn(service).when(token1).getService(); doThrow(new IOException("boom")).when(token1).renew(eq(conf)); final Token token2=mock(Token.class); doReturn(service).when(token2).getService(); doAnswer(new Answer(){ public Long answer( InvocationOnMock invocation){ return Time.now() + RENEW_CYCLE; } } ).when(token2).renew(eq(conf)); RenewableFileSystem fs=mock(RenewableFileSystem.class); doReturn(conf).when(fs).getConf(); doReturn(token1).doReturn(token2).when(fs).getRenewToken(); doReturn(token2).when(fs).getDelegationToken(null); doAnswer(new Answer[]>(){ public Token[] answer( InvocationOnMock invocation){ return new Token[]{token2}; } } ).when(fs).addDelegationTokens(null,null); renewer.addRenewAction(fs); assertEquals(1,renewer.getRenewQueueLength()); Thread.sleep(RENEW_CYCLE); verify(fs).getRenewToken(); verify(token1,atLeast(1)).renew(eq(conf)); verify(token1,atMost(2)).renew(eq(conf)); verify(fs).addDelegationTokens(null,null); verify(fs).setDelegationToken(eq(token2)); assertEquals(1,renewer.getRenewQueueLength()); renewer.removeRenewAction(fs); verify(token2).cancel(eq(conf)); assertEquals(0,renewer.getRenewQueueLength()); }

InternalCallVerifier EqualityVerifier 
@SuppressWarnings("unchecked") @Test public void testAddRemoveRenewAction() throws IOException, InterruptedException { Text service=new Text("myservice"); Configuration conf=mock(Configuration.class); Token token=mock(Token.class); doReturn(service).when(token).getService(); doAnswer(new Answer(){ public Long answer( InvocationOnMock invocation){ return Time.now() + RENEW_CYCLE; } } ).when(token).renew(any(Configuration.class)); RenewableFileSystem fs=mock(RenewableFileSystem.class); doReturn(conf).when(fs).getConf(); doReturn(token).when(fs).getRenewToken(); renewer.addRenewAction(fs); assertEquals("FileSystem not added to DelegationTokenRenewer",1,renewer.getRenewQueueLength()); Thread.sleep(RENEW_CYCLE * 2); verify(token,atLeast(2)).renew(eq(conf)); verify(token,atMost(3)).renew(eq(conf)); verify(token,never()).cancel(any(Configuration.class)); renewer.removeRenewAction(fs); verify(token).cancel(eq(conf)); verify(fs,never()).getDelegationToken(null); verify(fs,never()).setDelegationToken(any(Token.class)); assertEquals("FileSystem not removed from DelegationTokenRenewer",0,renewer.getRenewQueueLength()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=4000) public void testMultipleTokensDoNotDeadlock() throws IOException, InterruptedException { Configuration conf=mock(Configuration.class); FileSystem fs=mock(FileSystem.class); doReturn(conf).when(fs).getConf(); long distantFuture=Time.now() + 3600 * 1000; Token token1=mock(Token.class); doReturn(new Text("myservice1")).when(token1).getService(); doReturn(distantFuture).when(token1).renew(eq(conf)); Token token2=mock(Token.class); doReturn(new Text("myservice2")).when(token2).getService(); doReturn(distantFuture).when(token2).renew(eq(conf)); RenewableFileSystem fs1=mock(RenewableFileSystem.class); doReturn(conf).when(fs1).getConf(); doReturn(token1).when(fs1).getRenewToken(); RenewableFileSystem fs2=mock(RenewableFileSystem.class); doReturn(conf).when(fs2).getConf(); doReturn(token2).when(fs2).getRenewToken(); renewer.addRenewAction(fs1); renewer.addRenewAction(fs2); assertEquals(2,renewer.getRenewQueueLength()); renewer.removeRenewAction(fs1); assertEquals(1,renewer.getRenewQueueLength()); renewer.removeRenewAction(fs2); assertEquals(0,renewer.getRenewQueueLength()); verify(token1).cancel(eq(conf)); verify(token2).cancel(eq(conf)); }

InternalCallVerifier EqualityVerifier 
@Test public void testAddRenewActionWithNoToken() throws IOException, InterruptedException { Configuration conf=mock(Configuration.class); RenewableFileSystem fs=mock(RenewableFileSystem.class); doReturn(conf).when(fs).getConf(); doReturn(null).when(fs).getRenewToken(); renewer.addRenewAction(fs); verify(fs).getRenewToken(); assertEquals(0,renewer.getRenewQueueLength()); }

Class: org.apache.hadoop.fs.TestEnhancedByteBufferAccess

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testShortZeroCopyReads() throws Exception { HdfsConfiguration conf=initZeroCopyTest(); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); FSDataInputStream fsIn=null; final int TEST_FILE_LENGTH=12345; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,7567L); try { DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); } catch ( InterruptedException e) { Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch ( TimeoutException e) { Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e); } fsIn=fs.open(TEST_PATH); byte original[]=new byte[TEST_FILE_LENGTH]; IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH); fsIn.close(); fsIn=fs.open(TEST_PATH); HdfsDataInputStream dfsIn=(HdfsDataInputStream)fsIn; ByteBuffer result=dfsIn.read(null,8192,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(4096,result.remaining()); Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalBytesRead()); Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); Assert.assertArrayEquals(Arrays.copyOfRange(original,0,4096),byteBufferToArray(result)); dfsIn.releaseBuffer(result); result=dfsIn.read(null,4097,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(4096,result.remaining()); Assert.assertArrayEquals(Arrays.copyOfRange(original,4096,8192),byteBufferToArray(result)); dfsIn.releaseBuffer(result); } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testClientMmapDisable() throws Exception { HdfsConfiguration conf=initZeroCopyTest(); conf.setBoolean(DFS_CLIENT_MMAP_ENABLED,false); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); final int TEST_FILE_LENGTH=16385; final int RANDOM_SEED=23453; final String CONTEXT="testClientMmapDisable"; FSDataInputStream fsIn=null; DistributedFileSystem fs=null; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED); DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); fsIn=fs.open(TEST_PATH); try { fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.fail("expected zero-copy read to fail when client mmaps " + "were disabled."); } catch ( UnsupportedOperationException e) { } } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } fsIn=null; fs=null; cluster=null; try { conf.setBoolean(DFS_CLIENT_MMAP_ENABLED,true); conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE,0); conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT + ".1"); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED); DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); fsIn=fs.open(TEST_PATH); ByteBuffer buf=fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); fsIn.releaseBuffer(buf); IOUtils.skipFully(fsIn,TEST_FILE_LENGTH - 1); buf=fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(null,buf); } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that we can zero-copy read cached data even without disabling * checksums. */ @Test(timeout=120000) public void testZeroCopyReadOfCachedData() throws Exception { BlockReaderTestUtil.enableShortCircuitShmTracing(); BlockReaderTestUtil.enableBlockReaderFactoryTracing(); BlockReaderTestUtil.enableHdfsCachingTracing(); final int TEST_FILE_LENGTH=16385; final Path TEST_PATH=new Path("/a"); final int RANDOM_SEED=23453; HdfsConfiguration conf=initZeroCopyTest(); conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,false); final String CONTEXT="testZeroCopyReadOfCachedData"; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT); conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,4096)); MiniDFSCluster cluster=null; ByteBuffer result=null, result2=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FsDatasetSpi fsd=cluster.getDataNodes().get(0).getFSDataset(); DistributedFileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,RANDOM_SEED); DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); byte original[]=DFSTestUtil.calculateFileContentsFromSeed(RANDOM_SEED,TEST_FILE_LENGTH); FSDataInputStream fsIn=fs.open(TEST_PATH); try { result=fsIn.read(null,TEST_FILE_LENGTH / 2,EnumSet.noneOf(ReadOption.class)); Assert.fail("expected UnsupportedOperationException"); } catch ( UnsupportedOperationException e) { } fs.addCachePool(new CachePoolInfo("pool1")); long directiveId=fs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(TEST_PATH).setReplication((short)1).setPool("pool1").build()); int numBlocks=(int)Math.ceil((double)TEST_FILE_LENGTH / BLOCK_SIZE); DFSTestUtil.verifyExpectedCacheUsage(DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,BLOCK_SIZE),numBlocks,cluster.getDataNodes().get(0).getFSDataset()); try { result=fsIn.read(null,TEST_FILE_LENGTH,EnumSet.noneOf(ReadOption.class)); } catch ( UnsupportedOperationException e) { Assert.fail("expected to be able to read cached file via zero-copy"); } Assert.assertArrayEquals(Arrays.copyOfRange(original,0,BLOCK_SIZE),byteBufferToArray(result)); FSDataInputStream fsIn2=fs.open(TEST_PATH); try { result2=fsIn2.read(null,TEST_FILE_LENGTH,EnumSet.noneOf(ReadOption.class)); } catch ( UnsupportedOperationException e) { Assert.fail("expected to be able to read cached file via zero-copy"); } Assert.assertArrayEquals(Arrays.copyOfRange(original,0,BLOCK_SIZE),byteBufferToArray(result2)); fsIn2.releaseBuffer(result2); fsIn2.close(); final ExtendedBlock firstBlock=DFSTestUtil.getFirstBlock(fs,TEST_PATH); final ShortCircuitCache cache=ClientContext.get(CONTEXT,new DFSClient.Conf(conf)).getShortCircuitCache(); waitForReplicaAnchorStatus(cache,firstBlock,true,true,1); fs.removeCacheDirective(directiveId); waitForReplicaAnchorStatus(cache,firstBlock,false,true,1); fsIn.releaseBuffer(result); waitForReplicaAnchorStatus(cache,firstBlock,false,false,1); DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd); fsIn.close(); fs.close(); cluster.shutdown(); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testZeroCopyReads() throws Exception { HdfsConfiguration conf=initZeroCopyTest(); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); FSDataInputStream fsIn=null; final int TEST_FILE_LENGTH=12345; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,7567L); try { DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); } catch ( InterruptedException e) { Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch ( TimeoutException e) { Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e); } fsIn=fs.open(TEST_PATH); byte original[]=new byte[TEST_FILE_LENGTH]; IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH); fsIn.close(); fsIn=fs.open(TEST_PATH); ByteBuffer result=fsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(4096,result.remaining()); HdfsDataInputStream dfsIn=(HdfsDataInputStream)fsIn; Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalBytesRead()); Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); Assert.assertArrayEquals(Arrays.copyOfRange(original,0,4096),byteBufferToArray(result)); fsIn.releaseBuffer(result); } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test public void test2GBMmapLimit() throws Exception { Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles()); HdfsConfiguration conf=initZeroCopyTest(); final long TEST_FILE_LENGTH=2469605888L; conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,"NULL"); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,TEST_FILE_LENGTH); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); final String CONTEXT="test2GBMmapLimit"; conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,CONTEXT); FSDataInputStream fsIn=null, fsIn2=null; ByteBuffer buf1=null, buf2=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,0xB); DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); fsIn=fs.open(TEST_PATH); buf1=fsIn.read(null,1,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(1,buf1.remaining()); fsIn.releaseBuffer(buf1); buf1=null; fsIn.seek(2147483640L); buf1=fsIn.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(7,buf1.remaining()); Assert.assertEquals(Integer.MAX_VALUE,buf1.limit()); fsIn.releaseBuffer(buf1); buf1=null; Assert.assertEquals(2147483647L,fsIn.getPos()); try { buf1=fsIn.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.fail("expected UnsupportedOperationException"); } catch ( UnsupportedOperationException e) { } fsIn.close(); fsIn=null; final Path TEST_PATH2=new Path("/b"); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,268435456L); DFSTestUtil.createFile(fs,TEST_PATH2,1024 * 1024,TEST_FILE_LENGTH,268435456L,(short)1,0xA); fsIn2=fs.open(TEST_PATH2); fsIn2.seek(2147483640L); buf2=fsIn2.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(8,buf2.remaining()); Assert.assertEquals(2147483648L,fsIn2.getPos()); fsIn2.releaseBuffer(buf2); buf2=null; buf2=fsIn2.read(null,1024,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(1024,buf2.remaining()); Assert.assertEquals(2147484672L,fsIn2.getPos()); fsIn2.releaseBuffer(buf2); buf2=null; } finally { if (buf1 != null) { fsIn.releaseBuffer(buf1); } if (buf2 != null) { fsIn2.releaseBuffer(buf2); } IOUtils.cleanup(null,fsIn,fsIn2); if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testZeroCopyReadsNoFallback() throws Exception { HdfsConfiguration conf=initZeroCopyTest(); MiniDFSCluster cluster=null; final Path TEST_PATH=new Path("/a"); FSDataInputStream fsIn=null; final int TEST_FILE_LENGTH=12345; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LENGTH,(short)1,7567L); try { DFSTestUtil.waitReplication(fs,TEST_PATH,(short)1); } catch ( InterruptedException e) { Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e); } catch ( TimeoutException e) { Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e); } fsIn=fs.open(TEST_PATH); byte original[]=new byte[TEST_FILE_LENGTH]; IOUtils.readFully(fsIn,original,0,TEST_FILE_LENGTH); fsIn.close(); fsIn=fs.open(TEST_PATH); HdfsDataInputStream dfsIn=(HdfsDataInputStream)fsIn; ByteBuffer result; try { result=dfsIn.read(null,4097,EnumSet.noneOf(ReadOption.class)); Assert.fail("expected UnsupportedOperationException"); } catch ( UnsupportedOperationException e) { } result=dfsIn.read(null,4096,EnumSet.of(ReadOption.SKIP_CHECKSUMS)); Assert.assertEquals(4096,result.remaining()); Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalBytesRead()); Assert.assertEquals(4096,dfsIn.getReadStatistics().getTotalZeroCopyBytesRead()); Assert.assertArrayEquals(Arrays.copyOfRange(original,0,4096),byteBufferToArray(result)); } finally { if (fsIn != null) fsIn.close(); if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

Class: org.apache.hadoop.fs.TestFSMainOperationsLocalFileSystem

EqualityVerifier 
@Test @Override public void testWDAbsolute() throws IOException { Path absoluteDir=getTestRootPath(fSys,"test/existingDir"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); }

Class: org.apache.hadoop.fs.TestFiListPath

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Remove the target directory after the getListing RPC */ @Test public void testTargetDeletionForListStatus() throws Exception { LOG.info("Test Target Delete For listStatus"); try { fs.listStatus(TEST_PATH); fail("Test should fail with FileNotFoundException"); } catch ( FileNotFoundException e) { assertEquals("File " + TEST_PATH + " does not exist.",e.getMessage()); LOG.info(StringUtils.stringifyException(e)); } }

Class: org.apache.hadoop.fs.TestFiRename

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Rename test where both src and dst are files */ @Test public void testDeletionOfDstFile() throws Exception { Path src=getTestPath("testDeletionOfDstFile/dir/src"); Path dst=getTestPath("testDeletionOfDstFile/newdir/dst"); createFile(src); createFile(dst); final FSNamesystem namesystem=cluster.getNamesystem(); final long blocks=namesystem.getBlocksTotal(); final long fileCount=namesystem.getFilesTotal(); rename(src,dst,false,false,true,Rename.OVERWRITE); Assert.assertEquals(blocks - 1,namesystem.getBlocksTotal()); Assert.assertEquals(fileCount - 1,namesystem.getFilesTotal()); restartCluster(false); int count=0; boolean exception=true; src=getTestPath("testDeletionOfDstFile/dir/src"); dst=getTestPath("testDeletionOfDstFile/newdir/dst"); while (exception && count < 5) { try { exists(fc,src); exception=false; } catch ( Exception e) { LOG.warn("Exception " + " count " + count + " "+ e.getMessage()); Thread.sleep(1000); count++; } } Assert.assertFalse(exists(fc,src)); Assert.assertTrue(exists(fc,dst)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Rename test where both src and dst are directories */ @Test public void testDeletionOfDstDirectory() throws Exception { Path src=getTestPath("testDeletionOfDstDirectory/dir/src"); Path dst=getTestPath("testDeletionOfDstDirectory/newdir/dst"); fc.mkdir(src,FileContext.DEFAULT_PERM,true); fc.mkdir(dst,FileContext.DEFAULT_PERM,true); FSNamesystem namesystem=cluster.getNamesystem(); long fileCount=namesystem.getFilesTotal(); rename(src,dst,false,false,true,Rename.OVERWRITE); Assert.assertEquals(fileCount - 1,namesystem.getFilesTotal()); restartCluster(false); src=getTestPath("testDeletionOfDstDirectory/dir/src"); dst=getTestPath("testDeletionOfDstDirectory/newdir/dst"); int count=0; boolean exception=true; while (exception && count < 5) { try { exists(fc,src); exception=false; } catch ( Exception e) { LOG.warn("Exception " + " count " + count + " "+ e.getMessage()); Thread.sleep(1000); count++; } } Assert.assertFalse(exists(fc,src)); Assert.assertTrue(exists(fc,dst)); }

Class: org.apache.hadoop.fs.TestFileContextResolveAfs

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testFileContextResolveAfs() throws IOException { Configuration conf=new Configuration(); localFs=FileSystem.get(conf); Path localPath=new Path(TEST_ROOT_DIR_LOCAL + "/TestFileContextResolveAfs1"); Path linkPath=localFs.makeQualified(new Path(TEST_ROOT_DIR_LOCAL,"TestFileContextResolveAfs2")); localFs.mkdirs(new Path(TEST_ROOT_DIR_LOCAL)); localFs.create(localPath); fc.createSymlink(localPath,linkPath,true); Set afsList=fc.resolveAbstractFileSystems(linkPath); Assert.assertEquals(1,afsList.size()); localFs.deleteOnExit(localPath); localFs.deleteOnExit(linkPath); localFs.close(); }

Class: org.apache.hadoop.fs.TestFileStatus

InternalCallVerifier EqualityVerifier 
/** * Check that the write and readField methods work correctly. */ @Test public void testFileStatusWritable() throws Exception { FileStatus[] tests={new FileStatus(1,false,5,3,4,5,null,"","",new Path("/a/b")),new FileStatus(0,false,1,2,3,new Path("/")),new FileStatus(1,false,5,3,4,5,null,"","",new Path("/a/b"))}; LOG.info("Writing FileStatuses to a ByteArrayOutputStream"); ByteArrayOutputStream baos=new ByteArrayOutputStream(); DataOutput out=new DataOutputStream(baos); for ( FileStatus fs : tests) { fs.write(out); } LOG.info("Creating ByteArrayInputStream object"); DataInput in=new DataInputStream(new ByteArrayInputStream(baos.toByteArray())); LOG.info("Testing if read objects are equal to written ones"); FileStatus dest=new FileStatus(); int iterator=0; for ( FileStatus fs : tests) { dest.readFields(in); assertEquals("Different FileStatuses in iteration " + iterator,dest,fs); iterator++; } }

EqualityVerifier 
/** * Check that FileStatus are equal if their paths are equal. */ @Test public void testEquals(){ Path path=new Path("path"); FileStatus fileStatus1=new FileStatus(1,true,1,1,1,1,FsPermission.valueOf("-rw-rw-rw-"),"one","one",null,path); FileStatus fileStatus2=new FileStatus(2,true,2,2,2,2,FsPermission.valueOf("---x--x--x"),"two","two",null,path); assertEquals(fileStatus1,fileStatus2); }

Class: org.apache.hadoop.fs.TestFileSystemCaching

APIUtilityVerifier UtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testDefaultFsUris() throws Exception { final Configuration conf=new Configuration(); conf.set("fs.defaultfs.impl",DefaultFs.class.getName()); final URI defaultUri=URI.create("defaultfs://host"); FileSystem.setDefaultUri(conf,defaultUri); FileSystem fs=null; final FileSystem defaultFs=FileSystem.get(conf); assertEquals(defaultUri,defaultFs.getUri()); fs=FileSystem.get(URI.create("defaultfs:/"),conf); assertSame(defaultFs,fs); fs=FileSystem.get(URI.create("defaultfs:///"),conf); assertSame(defaultFs,fs); fs=FileSystem.get(URI.create("defaultfs://host"),conf); assertSame(defaultFs,fs); fs=FileSystem.get(URI.create("defaultfs://host2"),conf); assertNotSame(defaultFs,fs); fs=FileSystem.get(URI.create("/"),conf); assertSame(defaultFs,fs); try { fs=FileSystem.get(URI.create("//host"),conf); fail("got fs with auth but no scheme"); } catch ( Exception e) { assertEquals("No FileSystem for scheme: null",e.getMessage()); } try { fs=FileSystem.get(URI.create("//host2"),conf); fail("got fs with auth but no scheme"); } catch ( Exception e) { assertEquals("No FileSystem for scheme: null",e.getMessage()); } }

Class: org.apache.hadoop.fs.TestFileSystemTokens

InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithChildTokensOneExists() throws Exception { Credentials credentials=new Credentials(); Text service1=new Text("singleTokenFs1"); Text service2=new Text("singleTokenFs2"); Token token=mock(Token.class); credentials.addToken(service2,token); MockFileSystem fs1=createFileSystemForServiceName(service1); MockFileSystem fs2=createFileSystemForServiceName(service2); MockFileSystem fs3=createFileSystemForServiceName(null); MockFileSystem multiFs=createFileSystemForServiceName(null,fs1,fs2,fs3); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,false); verifyTokenFetch(fs1,true); verifyTokenFetch(fs2,false); verifyTokenFetch(fs3,false); assertEquals(2,credentials.numberOfTokens()); assertNotNull(credentials.getToken(service1)); assertSame(token,credentials.getToken(service2)); }

InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithMyOwnExistsAndChildTokens() throws Exception { Credentials credentials=new Credentials(); Text service1=new Text("singleTokenFs1"); Text service2=new Text("singleTokenFs2"); Text myService=new Text("multiTokenFs"); Token token=mock(Token.class); credentials.addToken(myService,token); MockFileSystem fs1=createFileSystemForServiceName(service1); MockFileSystem fs2=createFileSystemForServiceName(service2); MockFileSystem multiFs=createFileSystemForServiceName(myService,fs1,fs2); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,false); verifyTokenFetch(fs1,true); verifyTokenFetch(fs2,true); assertEquals(3,credentials.numberOfTokens()); assertSame(token,credentials.getToken(myService)); assertNotNull(credentials.getToken(service1)); assertNotNull(credentials.getToken(service2)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithToken() throws Exception { Text service=new Text("singleTokenFs"); MockFileSystem fs=createFileSystemForServiceName(service); Credentials credentials=new Credentials(); fs.addDelegationTokens(renewer,credentials); verifyTokenFetch(fs,true); assertEquals(1,credentials.numberOfTokens()); assertNotNull(credentials.getToken(service)); }

InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithTokenExists() throws Exception { Credentials credentials=new Credentials(); Text service=new Text("singleTokenFs"); MockFileSystem fs=createFileSystemForServiceName(service); Token token=mock(Token.class); credentials.addToken(service,token); fs.addDelegationTokens(renewer,credentials); verifyTokenFetch(fs,false); assertEquals(1,credentials.numberOfTokens()); assertSame(token,credentials.getToken(service)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithDuplicateChildren() throws Exception { Credentials credentials=new Credentials(); Text service=new Text("singleTokenFs1"); MockFileSystem fs=createFileSystemForServiceName(service); MockFileSystem multiFs=createFileSystemForServiceName(null,fs,new FilterFileSystem(fs)); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,false); verifyTokenFetch(fs,true); assertEquals(1,credentials.numberOfTokens()); assertNotNull(credentials.getToken(service)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithChildTokens() throws Exception { Credentials credentials=new Credentials(); Text service1=new Text("singleTokenFs1"); Text service2=new Text("singleTokenFs2"); MockFileSystem fs1=createFileSystemForServiceName(service1); MockFileSystem fs2=createFileSystemForServiceName(service2); MockFileSystem fs3=createFileSystemForServiceName(null); MockFileSystem multiFs=createFileSystemForServiceName(null,fs1,fs2,fs3); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,false); verifyTokenFetch(fs1,true); verifyTokenFetch(fs2,true); verifyTokenFetch(fs3,false); assertEquals(2,credentials.numberOfTokens()); assertNotNull(credentials.getToken(service1)); assertNotNull(credentials.getToken(service2)); }

InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithDuplicateChildrenTokenExists() throws Exception { Credentials credentials=new Credentials(); Text service=new Text("singleTokenFs1"); Token token=mock(Token.class); credentials.addToken(service,token); MockFileSystem fs=createFileSystemForServiceName(service); MockFileSystem multiFs=createFileSystemForServiceName(null,fs,new FilterFileSystem(fs)); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,false); verifyTokenFetch(fs,false); assertEquals(1,credentials.numberOfTokens()); assertSame(token,credentials.getToken(service)); }

EqualityVerifier 
@Test public void testFsWithNoToken() throws Exception { MockFileSystem fs=createFileSystemForServiceName(null); Credentials credentials=new Credentials(); fs.addDelegationTokens(renewer,credentials); verifyTokenFetch(fs,false); assertEquals(0,credentials.numberOfTokens()); }

InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithNestedDuplicatesChildren() throws Exception { Credentials credentials=new Credentials(); Text service1=new Text("singleTokenFs1"); Text service2=new Text("singleTokenFs2"); Text service4=new Text("singleTokenFs4"); Text multiService=new Text("multiTokenFs"); Token token2=mock(Token.class); credentials.addToken(service2,token2); MockFileSystem fs1=createFileSystemForServiceName(service1); MockFileSystem fs1B=createFileSystemForServiceName(service1); MockFileSystem fs2=createFileSystemForServiceName(service2); MockFileSystem fs3=createFileSystemForServiceName(null); MockFileSystem fs4=createFileSystemForServiceName(service4); MockFileSystem multiFs=createFileSystemForServiceName(multiService,fs1,fs1B,fs2,fs2,new FilterFileSystem(fs3),new FilterFileSystem(new FilterFileSystem(fs4))); MockFileSystem superMultiFs=createFileSystemForServiceName(null,fs1,fs1B,fs1,new FilterFileSystem(fs3),new FilterFileSystem(multiFs)); superMultiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(superMultiFs,false); verifyTokenFetch(multiFs,true); verifyTokenFetch(fs1,true); verifyTokenFetch(fs2,false); verifyTokenFetch(fs3,false); verifyTokenFetch(fs4,true); assertEquals(4,credentials.numberOfTokens()); assertNotNull(credentials.getToken(service1)); assertNotNull(credentials.getToken(service2)); assertSame(token2,credentials.getToken(service2)); assertNotNull(credentials.getToken(multiService)); assertNotNull(credentials.getToken(service4)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFsWithMyOwnAndChildTokens() throws Exception { Credentials credentials=new Credentials(); Text service1=new Text("singleTokenFs1"); Text service2=new Text("singleTokenFs2"); Text myService=new Text("multiTokenFs"); Token token=mock(Token.class); credentials.addToken(service2,token); MockFileSystem fs1=createFileSystemForServiceName(service1); MockFileSystem fs2=createFileSystemForServiceName(service2); MockFileSystem multiFs=createFileSystemForServiceName(myService,fs1,fs2); multiFs.addDelegationTokens(renewer,credentials); verifyTokenFetch(multiFs,true); verifyTokenFetch(fs1,true); verifyTokenFetch(fs2,false); assertEquals(3,credentials.numberOfTokens()); assertNotNull(credentials.getToken(myService)); assertNotNull(credentials.getToken(service1)); assertNotNull(credentials.getToken(service2)); }

Class: org.apache.hadoop.fs.TestFileUtil

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testListAPI() throws IOException { setupDirs(); String[] files=FileUtil.list(partitioned); Assert.assertEquals("Unexpected number of pre-existing files",2,files.length); File newDir=new File(tmp.getPath(),"test"); newDir.mkdir(); Assert.assertTrue("Failed to create test dir",newDir.exists()); files=FileUtil.list(newDir); Assert.assertEquals("New directory unexpectedly contains files",0,files.length); newDir.delete(); Assert.assertFalse("Failed to delete test dir",newDir.exists()); try { files=FileUtil.list(newDir); Assert.fail("IOException expected on list() for non-existent dir " + newDir.toString()); } catch ( IOException ioe) { } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests if fullyDelete deletes * (a) symlink to file only and not the file pointed to by symlink. * (b) symlink to dir only and not the dir pointed to by symlink. * @throws IOException */ @Test(timeout=30000) public void testFullyDeleteSymlinks() throws IOException { setupDirs(); File link=new File(del,LINK); Assert.assertEquals(5,del.list().length); boolean ret=FileUtil.fullyDelete(link); Assert.assertTrue(ret); Assert.assertFalse(link.exists()); Assert.assertEquals(4,del.list().length); validateTmpDir(); File linkDir=new File(del,"tmpDir"); ret=FileUtil.fullyDelete(linkDir); Assert.assertTrue(ret); Assert.assertFalse(linkDir.exists()); Assert.assertEquals(3,del.list().length); validateTmpDir(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testUnZip() throws IOException { setupDirs(); final File simpleZip=new File(del,FILE); OutputStream os=new FileOutputStream(simpleZip); ZipOutputStream tos=new ZipOutputStream(os); try { ZipEntry ze=new ZipEntry("foo"); byte[] data="some-content".getBytes("UTF-8"); ze.setSize(data.length); tos.putNextEntry(ze); tos.write(data); tos.closeEntry(); tos.flush(); tos.finish(); } finally { tos.close(); } FileUtil.unZip(simpleZip,tmp); assertTrue(new File(tmp,"foo").exists()); assertEquals(12,new File(tmp,"foo").length()); final File regularFile=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog"); regularFile.createNewFile(); assertTrue(regularFile.exists()); try { FileUtil.unZip(simpleZip,regularFile); assertTrue("An IOException expected.",false); } catch ( IOException ioe) { } }

BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCopyMergeSingleDirectory() throws IOException { setupDirs(); boolean copyMergeResult=copyMerge("partitioned","tmp/merged"); Assert.assertTrue("Expected successful copyMerge result.",copyMergeResult); File merged=new File(TEST_DIR,"tmp/merged"); Assert.assertTrue("File tmp/merged must exist after copyMerge.",merged.exists()); BufferedReader rdr=new BufferedReader(new FileReader(merged)); try { Assert.assertEquals("Line 1 of merged file must contain \"foo\".","foo",rdr.readLine()); Assert.assertEquals("Line 2 of merged file must contain \"bar\".","bar",rdr.readLine()); Assert.assertNull("Expected end of file reading merged file.",rdr.readLine()); } finally { rdr.close(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testStat2Paths2(){ Path defaultPath=new Path("file://default"); Path[] paths=FileUtil.stat2Paths(null,defaultPath); assertEquals(1,paths.length); assertEquals(defaultPath,paths[0]); paths=FileUtil.stat2Paths(null,null); assertTrue(paths != null); assertEquals(1,paths.length); assertEquals(null,paths[0]); Path path1=new Path("file://foo"); Path path2=new Path("file://moo"); FileStatus[] fileStatuses=new FileStatus[]{new FileStatus(3,false,0,0,0,path1),new FileStatus(3,false,0,0,0,path2)}; paths=FileUtil.stat2Paths(fileStatuses,defaultPath); assertEquals(2,paths.length); assertEquals(paths[0],path1); assertEquals(paths[1],path2); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testSymlink() throws Exception { Assert.assertFalse(del.exists()); del.mkdirs(); byte[] data="testSymLink".getBytes(); File file=new File(del,FILE); File link=new File(del,"_link"); FileOutputStream os=new FileOutputStream(file); os.write(data); os.close(); FileUtil.symLink(file.getAbsolutePath(),link.getAbsolutePath()); Assert.assertEquals(data.length,file.length()); Assert.assertEquals(data.length,link.length()); FileInputStream in=new FileInputStream(link); long len=0; while (in.read() > 0) { len++; } in.close(); Assert.assertEquals(data.length,len); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testFullyDeleteContents() throws IOException { setupDirs(); boolean ret=FileUtil.fullyDeleteContents(del); Assert.assertTrue(ret); Assert.assertTrue(del.exists()); Assert.assertEquals(0,del.listFiles().length); validateTmpDir(); }

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testStat2Paths1(){ assertNull(FileUtil.stat2Paths(null)); FileStatus[] fileStatuses=new FileStatus[0]; Path[] paths=FileUtil.stat2Paths(fileStatuses); assertEquals(0,paths.length); Path path1=new Path("file://foo"); Path path2=new Path("file://moo"); fileStatuses=new FileStatus[]{new FileStatus(3,false,0,0,0,path1),new FileStatus(3,false,0,0,0,path2)}; paths=FileUtil.stat2Paths(fileStatuses); assertEquals(2,paths.length); assertEquals(paths[0],path1); assertEquals(paths[1],path2); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCopy5() throws IOException { setupDirs(); URI uri=tmp.toURI(); Configuration conf=new Configuration(); FileSystem fs=FileSystem.newInstance(uri,conf); final String content="some-content"; File srcFile=createFile(tmp,"src",content); Path srcPath=new Path(srcFile.toURI()); final File dest=new File(del,"dest"); boolean result=FileUtil.copy(fs,srcPath,dest,false,conf); assertTrue(result); assertTrue(dest.exists()); assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length,dest.length()); assertTrue(srcFile.exists()); dest.delete(); assertTrue(!dest.exists()); result=FileUtil.copy(fs,srcPath,dest,true,conf); assertTrue(result); assertTrue(dest.exists()); assertEquals(content.getBytes().length + System.getProperty("line.separator").getBytes().length,dest.length()); assertTrue(!srcFile.exists()); dest.delete(); assertTrue(!dest.exists()); srcPath=new Path(partitioned.toURI()); result=FileUtil.copy(fs,srcPath,dest,true,conf); assertTrue(result); assertTrue(dest.exists() && dest.isDirectory()); File[] files=dest.listFiles(); assertTrue(files != null); assertEquals(2,files.length); for ( File f : files) { assertEquals(3 + System.getProperty("line.separator").getBytes().length,f.length()); } assertTrue(!partitioned.exists()); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests if fullyDelete deletes * (a) dangling symlink to file properly * (b) dangling symlink to directory properly * @throws IOException */ @Test(timeout=30000) public void testFullyDeleteDanglingSymlinks() throws IOException { setupDirs(); boolean ret=FileUtil.fullyDelete(tmp); Assert.assertTrue(ret); Assert.assertFalse(tmp.exists()); File link=new File(del,LINK); Assert.assertEquals(5,del.list().length); ret=FileUtil.fullyDelete(link); Assert.assertTrue(ret); Assert.assertEquals(4,del.list().length); File linkDir=new File(del,"tmpDir"); ret=FileUtil.fullyDelete(linkDir); Assert.assertTrue(ret); Assert.assertEquals(3,del.list().length); }

BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that length on a symlink works as expected. */ @Test(timeout=30000) public void testSymlinkLength() throws Exception { Assert.assertFalse(del.exists()); del.mkdirs(); byte[] data="testSymLinkData".getBytes(); File file=new File(del,FILE); File link=new File(del,"_link"); FileOutputStream os=new FileOutputStream(file); os.write(data); os.close(); Assert.assertEquals(0,link.length()); FileUtil.symLink(file.getAbsolutePath(),link.getAbsolutePath()); Assert.assertEquals(data.length,file.length()); Assert.assertEquals(data.length,link.length()); file.delete(); Assert.assertFalse(file.exists()); if (Shell.WINDOWS && !Shell.isJava7OrAbove()) { Assert.assertEquals(data.length,link.length()); } else { Assert.assertEquals(0,link.length()); } link.delete(); Assert.assertFalse(link.exists()); }

APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that getDU is able to handle cycles caused due to symbolic links * and that directory sizes are not added to the final calculated size * @throws IOException */ @Test(timeout=30000) public void testGetDU() throws Exception { setupDirs(); long du=FileUtil.getDU(TEST_DIR); final long expected=2 * (3 + System.getProperty("line.separator").length()); Assert.assertEquals(expected,du); final File doesNotExist=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog"); long duDoesNotExist=FileUtil.getDU(doesNotExist); assertEquals(0,duDoesNotExist); File notADirectory=new File(partitioned,"part-r-00000"); long duNotADirectoryActual=FileUtil.getDU(notADirectory); long duNotADirectoryExpected=3 + System.getProperty("line.separator").length(); assertEquals(duNotADirectoryExpected,duNotADirectoryActual); try { try { FileUtil.chmod(notADirectory.getAbsolutePath(),"0000"); } catch ( InterruptedException ie) { assertNull(ie); } assertFalse(FileUtil.canRead(notADirectory)); final long du3=FileUtil.getDU(partitioned); assertEquals(expected,du3); try { FileUtil.chmod(partitioned.getAbsolutePath(),"0000"); } catch ( InterruptedException ie) { assertNull(ie); } assertFalse(FileUtil.canRead(partitioned)); final long du4=FileUtil.getDU(partitioned); assertEquals(0,du4); } finally { FileUtil.chmod(partitioned.getAbsolutePath(),"0777",true); } }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testListFiles() throws IOException { setupDirs(); File[] files=FileUtil.listFiles(partitioned); Assert.assertEquals(2,files.length); File newDir=new File(tmp.getPath(),"test"); newDir.mkdir(); Assert.assertTrue("Failed to create test dir",newDir.exists()); files=FileUtil.listFiles(newDir); Assert.assertEquals(0,files.length); newDir.delete(); Assert.assertFalse("Failed to delete test dir",newDir.exists()); try { files=FileUtil.listFiles(newDir); Assert.fail("IOException expected on listFiles() for non-existent dir " + newDir.toString()); } catch ( IOException ioe) { } }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testUnTar() throws IOException { setupDirs(); final File simpleTar=new File(del,FILE); OutputStream os=new FileOutputStream(simpleTar); TarOutputStream tos=new TarOutputStream(os); try { TarEntry te=new TarEntry("foo"); byte[] data="some-content".getBytes("UTF-8"); te.setSize(data.length); tos.putNextEntry(te); tos.write(data); tos.closeEntry(); tos.flush(); tos.finish(); } finally { tos.close(); } FileUtil.unTar(simpleTar,tmp); assertTrue(new File(tmp,"foo").exists()); assertEquals(12,new File(tmp,"foo").length()); final File regularFile=new File(tmp,"QuickBrownFoxJumpsOverTheLazyDog"); regularFile.createNewFile(); assertTrue(regularFile.exists()); try { FileUtil.unTar(simpleTar,regularFile); assertTrue("An IOException expected.",false); } catch ( IOException ioe) { } }

APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCreateJarWithClassPath() throws Exception { Assert.assertFalse(tmp.exists()); Assert.assertTrue(tmp.mkdirs()); List wildcardMatches=Arrays.asList(new File(tmp,"wildcard1.jar"),new File(tmp,"wildcard2.jar"),new File(tmp,"wildcard3.JAR"),new File(tmp,"wildcard4.JAR")); for ( File wildcardMatch : wildcardMatches) { Assert.assertTrue("failure creating file: " + wildcardMatch,wildcardMatch.createNewFile()); } Assert.assertTrue(new File(tmp,"text.txt").createNewFile()); Assert.assertTrue(new File(tmp,"executable.exe").createNewFile()); Assert.assertTrue(new File(tmp,"README").createNewFile()); String wildcardPath=tmp.getCanonicalPath() + File.separator + "*"; String nonExistentSubdir=tmp.getCanonicalPath() + Path.SEPARATOR + "subdir"+ Path.SEPARATOR; List classPaths=Arrays.asList("","cp1.jar","cp2.jar",wildcardPath,"cp3.jar",nonExistentSubdir); String inputClassPath=StringUtils.join(File.pathSeparator,classPaths); String classPathJar=FileUtil.createJarWithClassPath(inputClassPath,new Path(tmp.getCanonicalPath()),System.getenv()); JarFile jarFile=null; try { jarFile=new JarFile(classPathJar); Manifest jarManifest=jarFile.getManifest(); Assert.assertNotNull(jarManifest); Attributes mainAttributes=jarManifest.getMainAttributes(); Assert.assertNotNull(mainAttributes); Assert.assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH)); String classPathAttr=mainAttributes.getValue(Attributes.Name.CLASS_PATH); Assert.assertNotNull(classPathAttr); List expectedClassPaths=new ArrayList(); for ( String classPath : classPaths) { if (classPath.length() == 0) { continue; } if (wildcardPath.equals(classPath)) { for ( File wildcardMatch : wildcardMatches) { expectedClassPaths.add(wildcardMatch.toURI().toURL().toExternalForm()); } } else { File fileCp=null; if (!new Path(classPath).isAbsolute()) { fileCp=new File(tmp,classPath); } else { fileCp=new File(classPath); } if (nonExistentSubdir.equals(classPath)) { expectedClassPaths.add(fileCp.toURI().toURL().toExternalForm() + Path.SEPARATOR); } else { expectedClassPaths.add(fileCp.toURI().toURL().toExternalForm()); } } } List actualClassPaths=Arrays.asList(classPathAttr.split(" ")); Collections.sort(expectedClassPaths); Collections.sort(actualClassPaths); Assert.assertEquals(expectedClassPaths,actualClassPaths); } finally { if (jarFile != null) { try { jarFile.close(); } catch ( IOException e) { LOG.warn("exception closing jarFile: " + classPathJar,e); } } } }

Class: org.apache.hadoop.fs.TestFilterFileSystem

EqualityVerifier 
@Test public void testInitFilterLocalFsSetsEmbedConf() throws Exception { FilterFileSystem flfs=new FilterLocalFileSystem(); assertEquals(LocalFileSystem.class,flfs.getRawFileSystem().getClass()); checkFsConf(flfs,null,3); flfs.initialize(URI.create("flfs:/"),conf); checkFsConf(flfs,conf,3); }

EqualityVerifier 
@Test public void testInitFilterFsSetsEmbedConf() throws Exception { LocalFileSystem lfs=new LocalFileSystem(); checkFsConf(lfs,null,2); FilterFileSystem ffs=new FilterFileSystem(lfs); assertEquals(lfs,ffs.getRawFileSystem()); checkFsConf(ffs,null,3); ffs.initialize(URI.create("filter:/"),conf); checkFsConf(ffs,conf,3); }

Class: org.apache.hadoop.fs.TestFsShellCopy

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRepresentsDir() throws Exception { Path subdirDstPath=new Path(dstPath,srcPath.getName()); String argv[]=null; lfs.delete(dstPath,true); assertFalse(lfs.exists(dstPath)); argv=new String[]{"-put",srcPath.toString(),dstPath.toString()}; assertEquals(0,shell.run(argv)); assertTrue(lfs.exists(dstPath) && lfs.isFile(dstPath)); lfs.delete(dstPath,true); assertFalse(lfs.exists(dstPath)); lfs.delete(dstPath,true); for ( String suffix : new String[]{"/","/."}) { argv=new String[]{"-put",srcPath.toString(),dstPath.toString() + suffix}; assertEquals(1,shell.run(argv)); assertFalse(lfs.exists(dstPath)); assertFalse(lfs.exists(subdirDstPath)); } for ( String suffix : new String[]{"/","/."}) { lfs.delete(dstPath,true); lfs.mkdirs(dstPath); argv=new String[]{"-put",srcPath.toString(),dstPath.toString() + suffix}; assertEquals(0,shell.run(argv)); assertTrue(lfs.exists(subdirDstPath)); assertTrue(lfs.isFile(subdirDstPath)); } String dotdotDst=dstPath + "/foo/.."; lfs.delete(dstPath,true); lfs.mkdirs(new Path(dstPath,"foo")); argv=new String[]{"-put",srcPath.toString(),dotdotDst}; assertEquals(0,shell.run(argv)); assertTrue(lfs.exists(subdirDstPath)); assertTrue(lfs.isFile(subdirDstPath)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCopyMerge() throws Exception { Path root=new Path(testRootDir,"TestMerge"); Path f1=new Path(root,"f1"); Path f2=new Path(root,"f2"); Path f3=new Path(root,"f3"); Path fnf=new Path(root,"fnf"); Path d=new Path(root,"dir"); Path df1=new Path(d,"df1"); Path df2=new Path(d,"df2"); Path df3=new Path(d,"df3"); createFile(f1,f2,f3,df1,df2,df3); int exit; exit=shell.run(new String[]{"-getmerge",f1.toString(),"out"}); assertEquals(0,exit); assertEquals("f1",readFile("out")); exit=shell.run(new String[]{"-getmerge",fnf.toString(),"out"}); assertEquals(1,exit); assertFalse(lfs.exists(new Path("out"))); exit=shell.run(new String[]{"-getmerge",f1.toString(),f2.toString(),"out"}); assertEquals(0,exit); assertEquals("f1f2",readFile("out")); exit=shell.run(new String[]{"-getmerge",f2.toString(),f1.toString(),"out"}); assertEquals(0,exit); assertEquals("f2f1",readFile("out")); exit=shell.run(new String[]{"-getmerge","-nl",f1.toString(),f2.toString(),"out"}); assertEquals(0,exit); assertEquals("f1\nf2\n",readFile("out")); shell.run(new String[]{"-getmerge","-nl",new Path(root,"f*").toString(),"out"}); assertEquals(0,exit); assertEquals("f1\nf2\nf3\n",readFile("out")); shell.run(new String[]{"-getmerge","-nl",root.toString(),"out"}); assertEquals(0,exit); assertEquals("f1\nf2\nf3\n",readFile("out")); shell.run(new String[]{"-getmerge","-nl",d.toString(),"out"}); assertEquals(0,exit); assertEquals("df1\ndf2\ndf3\n",readFile("out")); shell.run(new String[]{"-getmerge","-nl",f1.toString(),d.toString(),f2.toString(),"out"}); assertEquals(0,exit); assertEquals("f1\ndf1\ndf2\ndf3\nf2\n",readFile("out")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveDirFromLocal() throws Exception { Path testRoot=new Path(testRootDir,"testPutDir"); lfs.delete(testRoot,true); lfs.mkdirs(testRoot); Path srcDir=new Path(testRoot,"srcDir"); lfs.mkdirs(srcDir); Path targetDir=new Path(testRoot,"target"); int exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()}); assertEquals(0,exit); assertFalse(lfs.exists(srcDir)); assertTrue(lfs.exists(targetDir)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveFileFromLocal() throws Exception { Path testRoot=new Path(testRootDir,"testPutFile"); lfs.delete(testRoot,true); lfs.mkdirs(testRoot); Path target=new Path(testRoot,"target"); Path srcFile=new Path(testRoot,new Path("srcFile")); lfs.createNewFile(srcFile); int exit=shell.run(new String[]{"-moveFromLocal",srcFile.toString(),target.toString()}); assertEquals(0,exit); assertFalse(lfs.exists(srcFile)); assertTrue(lfs.exists(target)); assertTrue(lfs.isFile(target)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMoveDirFromLocalDestExists() throws Exception { Path testRoot=new Path(testRootDir,"testPutDir"); lfs.delete(testRoot,true); lfs.mkdirs(testRoot); Path srcDir=new Path(testRoot,"srcDir"); lfs.mkdirs(srcDir); Path targetDir=new Path(testRoot,"target"); lfs.mkdirs(targetDir); int exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()}); assertEquals(0,exit); assertFalse(lfs.exists(srcDir)); assertTrue(lfs.exists(new Path(targetDir,srcDir.getName()))); lfs.mkdirs(srcDir); exit=shell.run(new String[]{"-moveFromLocal",srcDir.toString(),targetDir.toString()}); assertEquals(1,exit); assertTrue(lfs.exists(srcDir)); }

Class: org.apache.hadoop.fs.TestFsShellReturnCode

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRmForceWithNonexistentGlob() throws Exception { Configuration conf=new Configuration(); FsShell shell=new FsShell(); shell.setConf(conf); final ByteArrayOutputStream bytes=new ByteArrayOutputStream(); final PrintStream err=new PrintStream(bytes); final PrintStream oldErr=System.err; System.setErr(err); try { int exit=shell.run(new String[]{"-rm","-f","nomatch*"}); assertEquals(0,exit); assertTrue(bytes.toString().isEmpty()); } finally { IOUtils.closeStream(err); System.setErr(oldErr); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testGetWithInvalidSourcePathShouldNotDisplayNullInConsole() throws Exception { Configuration conf=new Configuration(); FsShell shell=new FsShell(); shell.setConf(conf); final ByteArrayOutputStream bytes=new ByteArrayOutputStream(); final PrintStream out=new PrintStream(bytes); final PrintStream oldErr=System.err; System.setErr(out); final String results; try { Path tdir=new Path(TEST_ROOT_DIR,"notNullCopy"); fileSys.delete(tdir,true); fileSys.mkdirs(tdir); String[] args=new String[3]; args[0]="-get"; args[1]=new Path(tdir.toUri().getPath(),"/invalidSrc").toString(); args[2]=new Path(tdir.toUri().getPath(),"/invalidDst").toString(); assertTrue("file exists",!fileSys.exists(new Path(args[1]))); assertTrue("file exists",!fileSys.exists(new Path(args[2]))); int run=shell.run(args); results=bytes.toString(); assertEquals("Return code should be 1",1,run); assertTrue(" Null is coming when source path is invalid. ",!results.contains("get: null")); assertTrue(" Not displaying the intended message ",results.contains("get: `" + args[1] + "': No such file or directory")); } finally { IOUtils.closeStream(out); System.setErr(oldErr); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRmWithNonexistentGlob() throws Exception { Configuration conf=new Configuration(); FsShell shell=new FsShell(); shell.setConf(conf); final ByteArrayOutputStream bytes=new ByteArrayOutputStream(); final PrintStream err=new PrintStream(bytes); final PrintStream oldErr=System.err; System.setErr(err); final String results; try { int exit=shell.run(new String[]{"-rm","nomatch*"}); assertEquals(1,exit); results=bytes.toString(); assertTrue(results.contains("rm: `nomatch*': No such file or directory")); } finally { IOUtils.closeStream(err); System.setErr(oldErr); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test Chmod 1. Create and write file on FS 2. Verify that exit code for * chmod on existing file is 0 3. Verify that exit code for chmod on * non-existing file is 1 4. Verify that exit code for chmod with glob input * on non-existing file is 1 5. Verify that exit code for chmod with glob * input on existing file in 0 * @throws Exception */ @Test(timeout=30000) public void testChmod() throws Exception { Path p1=new Path(TEST_ROOT_DIR,"testChmod/fileExists"); final String f1=p1.toUri().getPath(); final String f2=new Path(TEST_ROOT_DIR,"testChmod/fileDoesNotExist").toUri().getPath(); final String f3=new Path(TEST_ROOT_DIR,"testChmod/nonExistingfiles*").toUri().getPath(); final Path p4=new Path(TEST_ROOT_DIR,"testChmod/file1"); final Path p5=new Path(TEST_ROOT_DIR,"testChmod/file2"); final Path p6=new Path(TEST_ROOT_DIR,"testChmod/file3"); final String f7=new Path(TEST_ROOT_DIR,"testChmod/file*").toUri().getPath(); writeFile(fileSys,p1); assertTrue(fileSys.exists(p1)); String argv[]={"-chmod","777",f1}; assertEquals(0,fsShell.run(argv)); String argv2[]={"-chmod","777",f2}; assertEquals(1,fsShell.run(argv2)); String argv3[]={"-chmod","777",f3}; assertEquals(1,fsShell.run(argv3)); writeFile(fileSys,p4); assertTrue(fileSys.exists(p4)); writeFile(fileSys,p5); assertTrue(fileSys.exists(p5)); writeFile(fileSys,p6); assertTrue(fileSys.exists(p6)); String argv4[]={"-chmod","777",f7}; assertEquals(0,fsShell.run(argv4)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testInterrupt() throws Exception { MyFsShell shell=new MyFsShell(); shell.setConf(new Configuration()); final Path d=new Path(TEST_ROOT_DIR,"testInterrupt"); final Path f1=new Path(d,"f1"); final Path f2=new Path(d,"f2"); assertTrue(fileSys.mkdirs(d)); writeFile(fileSys,f1); assertTrue(fileSys.isFile(f1)); writeFile(fileSys,f2); assertTrue(fileSys.isFile(f2)); int exitCode=shell.run(new String[]{"-testInterrupt",f1.toString(),f2.toString()}); assertEquals(1,InterruptCommand.processed); assertEquals(130,exitCode); exitCode=shell.run(new String[]{"-testInterrupt",d.toString()}); assertEquals(2,InterruptCommand.processed); assertEquals(130,exitCode); }

Class: org.apache.hadoop.fs.TestGlobPaths

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void pTestCurlyBracket() throws IOException { Path[] matchedPath; String[] files; try { files=new String[]{USER_DIR + "/a.abcxx",USER_DIR + "/a.abxy",USER_DIR + "/a.hlp",USER_DIR + "/a.jhyy"}; matchedPath=prepareTesting(USER_DIR + "/a.{abc,jh}??",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[3]); } finally { cleanupDFS(); } try { files=new String[]{USER_DIR + "/a.abcxx",USER_DIR + "/a.abdxy",USER_DIR + "/a.hlp",USER_DIR + "/a.jhyy"}; matchedPath=prepareTesting(USER_DIR + "/a.{ab{c,d},jh}??",files); assertEquals(matchedPath.length,3); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[1]); assertEquals(matchedPath[2],path[3]); } finally { cleanupDFS(); } try { files=new String[]{USER_DIR + "/a/b",USER_DIR + "/a/d",USER_DIR + "/c/b",USER_DIR + "/c/d"}; matchedPath=prepareTesting(USER_DIR + "/{a/b,c/d}",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[3]); } finally { cleanupDFS(); } try { files=new String[]{"/a/b","/a/d","/c/b","/c/d"}; matchedPath=prepareTesting("{/a/b,/c/d}",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[3]); } finally { cleanupDFS(); } try { files=new String[]{USER_DIR + "/}bc",USER_DIR + "/}c"}; matchedPath=prepareTesting(USER_DIR + "/}{a,b}c",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); matchedPath=prepareTesting(USER_DIR + "/}{b}c",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); matchedPath=prepareTesting(USER_DIR + "/}{}bc",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); matchedPath=prepareTesting(USER_DIR + "/}{,}bc",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); matchedPath=prepareTesting(USER_DIR + "/}{b,}c",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[1]); matchedPath=prepareTesting(USER_DIR + "/}{,b}c",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[1]); matchedPath=prepareTesting(USER_DIR + "/}{ac,?}",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[1]); boolean hasException=false; try { prepareTesting(USER_DIR + "}{bc",files); } catch ( IOException e) { assertTrue(e.getMessage().startsWith("Illegal file pattern:")); hasException=true; } assertTrue(hasException); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestClosure2() throws IOException { try { String[] files=new String[]{USER_DIR + "/a.",USER_DIR + "/a.txt",USER_DIR + "/a.old.java",USER_DIR + "/.java"}; Path[] matchedPath=prepareTesting(USER_DIR + "/a.*",files); assertEquals(matchedPath.length,3); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[2]); assertEquals(matchedPath[2],path[1]); } finally { cleanupDFS(); } }

APIUtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test public void pTestEscape() throws IOException { org.junit.Assume.assumeTrue(!Path.WINDOWS); try { String[] files=new String[]{USER_DIR + "/ab\\[c.d"}; Path[] matchedPath=prepareTesting(USER_DIR + "/ab\\[c.d",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestClosure1() throws IOException { try { String[] files=new String[]{USER_DIR + "/a",USER_DIR + "/abc",USER_DIR + "/abc.p",USER_DIR + "/bacd"}; Path[] matchedPath=prepareTesting(USER_DIR + "/a*",files); assertEquals(matchedPath.length,3); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[1]); assertEquals(matchedPath[2],path[2]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestClosure4() throws IOException { try { String[] files=new String[]{USER_DIR + "/dir1/file1",USER_DIR + "/dir2/file2",USER_DIR + "/dir3/file1"}; Path[] matchedPath=prepareTesting(USER_DIR + "/*/file1",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[2]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestJavaRegexSpecialChars() throws IOException { try { String[] files=new String[]{USER_DIR + "/($.|+)bc",USER_DIR + "/abc"}; Path[] matchedPath=prepareTesting(USER_DIR + "/($.|+)*",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestClosure3() throws IOException { try { String[] files=new String[]{USER_DIR + "/a.txt.x",USER_DIR + "/ax",USER_DIR + "/ab37x",USER_DIR + "/bacd"}; Path[] matchedPath=prepareTesting(USER_DIR + "/a*x",files); assertEquals(matchedPath.length,3); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[2]); assertEquals(matchedPath[2],path[1]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestLiteral() throws IOException { try { String[] files=new String[]{USER_DIR + "/a2c",USER_DIR + "/abc.d"}; Path[] matchedPath=prepareTesting(USER_DIR + "/abc.d",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[1]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestSetExcl() throws IOException { try { String[] files=new String[]{USER_DIR + "/a.d",USER_DIR + "/a.e",USER_DIR + "/a.0",USER_DIR + "/a.h"}; Path[] matchedPath=prepareTesting(USER_DIR + "/a.[^a-cg-z0-9]",files); assertEquals(matchedPath.length,2); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[1]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestCombination() throws IOException { try { String[] files=new String[]{"/user/aa/a.c","/user/bb/a.cpp","/user1/cc/b.hlp","/user/dd/a.hxy"}; Path[] matchedPath=prepareTesting("/use?/*/a.[ch]{lp,xy}",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[3]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void testPathFilter() throws IOException { try { String[] files=new String[]{USER_DIR + "/a",USER_DIR + "/a/b"}; Path[] matchedPath=prepareTesting(USER_DIR + "/*/*",files,new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b")); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[1]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestRange() throws IOException { try { String[] files=new String[]{USER_DIR + "/a.d",USER_DIR + "/a.e",USER_DIR + "/a.f",USER_DIR + "/a.h"}; Path[] matchedPath=prepareTesting(USER_DIR + "/a.[d-fm]",files); assertEquals(matchedPath.length,3); assertEquals(matchedPath[0],path[0]); assertEquals(matchedPath[1],path[1]); assertEquals(matchedPath[2],path[2]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestAny() throws IOException { try { String[] files=new String[]{USER_DIR + "/abc",USER_DIR + "/a2c",USER_DIR + "/a.c",USER_DIR + "/abcd"}; Path[] matchedPath=prepareTesting(USER_DIR + "/a?c",files); assertEquals(matchedPath.length,3); assertEquals(matchedPath[0],path[2]); assertEquals(matchedPath[1],path[1]); assertEquals(matchedPath[2],path[0]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void testPathFilterWithFixedLastComponent() throws IOException { try { String[] files=new String[]{USER_DIR + "/a",USER_DIR + "/a/b",USER_DIR + "/c",USER_DIR + "/c/b"}; Path[] matchedPath=prepareTesting(USER_DIR + "/*/b",files,new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b")); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[1]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestClosure5() throws IOException { try { String[] files=new String[]{USER_DIR + "/dir1/file1",USER_DIR + "/file1"}; Path[] matchedPath=prepareTesting(USER_DIR + "/*/file1",files); assertEquals(matchedPath.length,1); assertEquals(matchedPath[0],path[0]); } finally { cleanupDFS(); } }

APIUtilityVerifier EqualityVerifier 
@Test public void pTestSet() throws IOException { try { String[] files=new String[]{USER_DIR + "/a.c",USER_DIR + "/a.cpp",USER_DIR + "/a.hlp",USER_DIR + "/a.hxy"}; Path[] matchedPath=prepareTesting(USER_DIR + "/a.[ch]??",files); assertEquals(matchedPath.length,3); assertEquals(matchedPath[0],path[1]); assertEquals(matchedPath[1],path[2]); assertEquals(matchedPath[2],path[3]); } finally { cleanupDFS(); } }

Class: org.apache.hadoop.fs.TestHarFileSystem

EqualityVerifier 
/** * Test how block location offsets and lengths are fixed. */ @Test public void testFixBlockLocations(){ { BlockLocation[] b={new BlockLocation(null,null,10,10)}; HarFileSystem.fixBlockLocations(b,0,20,5); assertEquals(b[0].getOffset(),5); assertEquals(b[0].getLength(),10); } { BlockLocation[] b={new BlockLocation(null,null,10,10)}; HarFileSystem.fixBlockLocations(b,0,20,15); assertEquals(b[0].getOffset(),0); assertEquals(b[0].getLength(),5); } { BlockLocation[] b={new BlockLocation(null,null,10,10)}; HarFileSystem.fixBlockLocations(b,0,10,5); assertEquals(b[0].getOffset(),5); assertEquals(b[0].getLength(),5); } { BlockLocation[] b={new BlockLocation(null,null,10,10)}; HarFileSystem.fixBlockLocations(b,0,6,12); assertEquals(b[0].getOffset(),0); assertEquals(b[0].getLength(),6); } { BlockLocation[] b={new BlockLocation(null,null,10,10)}; HarFileSystem.fixBlockLocations(b,3,20,5); assertEquals(b[0].getOffset(),5); assertEquals(b[0].getLength(),10); } { BlockLocation[] b={new BlockLocation(null,null,10,10)}; HarFileSystem.fixBlockLocations(b,3,20,15); assertEquals(b[0].getOffset(),3); assertEquals(b[0].getLength(),2); } { BlockLocation[] b={new BlockLocation(null,null,10,10)}; HarFileSystem.fixBlockLocations(b,3,7,5); assertEquals(b[0].getOffset(),5); assertEquals(b[0].getLength(),5); } { BlockLocation[] b={new BlockLocation(null,null,10,10)}; HarFileSystem.fixBlockLocations(b,3,3,12); assertEquals(b[0].getOffset(),3); assertEquals(b[0].getLength(),3); } { BlockLocation[] b={new BlockLocation(null,null,512,512),new BlockLocation(null,null,1024,512)}; HarFileSystem.fixBlockLocations(b,0,512,896); assertEquals(b[0].getOffset(),0); assertEquals(b[0].getLength(),128); assertEquals(b[1].getOffset(),128); assertEquals(b[1].getLength(),384); } }

EqualityVerifier 
@Test public void testFileChecksum() throws Exception { final Path p=new Path("har://file-localhost/foo.har/file1"); final HarFileSystem harfs=new HarFileSystem(); try { Assert.assertEquals(null,harfs.getFileChecksum(p)); } finally { if (harfs != null) { harfs.close(); } } }

Class: org.apache.hadoop.fs.TestHarFileSystemBasics

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListLocatedStatus() throws Exception { String testHarPath=this.getClass().getResource("/test.har").getPath(); URI uri=new URI("har://" + testHarPath); HarFileSystem hfs=new HarFileSystem(localFileSystem); hfs.initialize(uri,new Configuration()); Set expectedFileNames=new HashSet(); expectedFileNames.add("1.txt"); expectedFileNames.add("2.txt"); Path path=new Path("dir1"); RemoteIterator fileList=hfs.listLocatedStatus(path); while (fileList.hasNext()) { String fileName=fileList.next().getPath().getName(); assertTrue(fileName + " not in expected files list",expectedFileNames.contains(fileName)); expectedFileNames.remove(fileName); } assertEquals("Didn't find all of the expected file names: " + expectedFileNames,0,expectedFileNames.size()); }

InternalCallVerifier EqualityVerifier 
@Test public void testPositiveHarFileSystemBasics() throws Exception { assertEquals(HarFileSystem.VERSION,harFileSystem.getHarVersion()); final URI harUri=harFileSystem.getUri(); assertEquals(harPath.toUri().getPath(),harUri.getPath()); assertEquals("har",harUri.getScheme()); final Path homePath=harFileSystem.getHomeDirectory(); assertEquals(harPath.toUri().getPath(),homePath.toUri().getPath()); final Path workDirPath0=harFileSystem.getWorkingDirectory(); assertEquals(homePath,workDirPath0); harFileSystem.setWorkingDirectory(new Path("/foo/bar")); assertEquals(workDirPath0,harFileSystem.getWorkingDirectory()); }

Class: org.apache.hadoop.fs.TestHardLink

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWindowsSyntax(){ class win extends HardLinkCGWin { } ; assertEquals(5,win.hardLinkCommand.length); assertEquals(7,win.hardLinkMultPrefix.length); assertEquals(7,win.hardLinkMultSuffix.length); assertEquals(4,win.getLinkCountCommand.length); assertTrue(win.hardLinkMultPrefix[4].equals("%f")); assertEquals(2,("%f").length()); assertTrue(win.hardLinkMultDir.equals("\\%f")); assertEquals(3,("\\%f").length()); assertTrue(win.getLinkCountCommand[1].equals("hardlink")); assertEquals(4,("-c%h").length()); }

APIUtilityVerifier EqualityVerifier 
/** * Test createHardLinkMult() with empty list of files. * We use an extended version of the method call, that * returns the number of System exec calls made, which should * be zero in this case. */ @Test public void testCreateHardLinkMultEmptyList() throws IOException { String[] emptyList={}; int callCount=createHardLinkMult(src,emptyList,tgt_mult,getMaxAllowedCmdArgLength()); assertEquals(0,callCount); validateSetup(); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the single-file method HardLink.createHardLink(). * Also tests getLinkCount() with values greater than one. */ @Test public void testCreateHardLink() throws IOException { createHardLink(x1,x1_one); assertTrue(x1_one.exists()); assertEquals(2,getLinkCount(x1)); assertEquals(2,getLinkCount(x1_one)); assertEquals(1,getLinkCount(x2)); createHardLink(x2,y_one); createHardLink(x3,x3_one); assertEquals(2,getLinkCount(x2)); assertEquals(2,getLinkCount(x3)); createHardLink(x1,x11_one); assertEquals(3,getLinkCount(x1)); assertEquals(3,getLinkCount(x1_one)); assertEquals(3,getLinkCount(x11_one)); validateTgtOne(); appendToFile(x1_one,str3); assertTrue(fetchFileContents(x1_one).equals(str1 + str3)); assertTrue(fetchFileContents(x11_one).equals(str1 + str3)); assertTrue(fetchFileContents(x1).equals(str1 + str3)); }

EqualityVerifier 
/** * Sanity check the simplest case of HardLink.getLinkCount() * to make sure we get back "1" for ordinary single-linked files. * Tests with multiply-linked files are in later test cases. */ @Test public void testGetLinkCount() throws IOException { assertEquals(1,getLinkCount(x1)); assertEquals(1,getLinkCount(x2)); assertEquals(1,getLinkCount(x3)); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateHardLinkMult() throws IOException { String[] fileNames=src.list(); createHardLinkMult(src,fileNames,tgt_mult); assertEquals(2,getLinkCount(x1)); assertEquals(2,getLinkCount(x2)); assertEquals(2,getLinkCount(x3)); assertEquals(2,getLinkCount(x1_mult)); assertEquals(2,getLinkCount(x2_mult)); assertEquals(2,getLinkCount(x3_mult)); validateTgtMult(); appendToFile(x1_mult,str3); assertTrue(fetchFileContents(x1_mult).equals(str1 + str3)); assertTrue(fetchFileContents(x1).equals(str1 + str3)); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test createHardLinkMult(), again, this time with the "too long list" * case where the total size of the command line arguments exceed the * allowed maximum. In this case, the list should be automatically * broken up into chunks, each chunk no larger than the max allowed. * We use an extended version of the method call, specifying the * size limit explicitly, to simulate the "too long" list with a * relatively short list. */ @Test public void testCreateHardLinkMultOversizeAndEmpty() throws IOException { String name1="x11111111"; String name2="x22222222"; String name3="x33333333"; File x1_long=new File(src,name1); File x2_long=new File(src,name2); File x3_long=new File(src,name3); x1.renameTo(x1_long); x2.renameTo(x2_long); x3.renameTo(x3_long); assertTrue(x1_long.exists()); assertTrue(x2_long.exists()); assertTrue(x3_long.exists()); assertFalse(x1.exists()); assertFalse(x2.exists()); assertFalse(x3.exists()); int callCount; String[] emptyList={}; String[] fileNames=src.list(); int overhead=getLinkMultArgLength(src,emptyList,tgt_mult); int maxLength=overhead + (int)(2.5 * (float)(1 + name1.length())); callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength); assertEquals(2,callCount); String[] tgt_multNames=tgt_mult.list(); Arrays.sort(fileNames); Arrays.sort(tgt_multNames); assertArrayEquals(fileNames,tgt_multNames); FileUtil.fullyDelete(tgt_mult); assertFalse(tgt_mult.exists()); tgt_mult.mkdirs(); assertTrue(tgt_mult.exists() && tgt_mult.list().length == 0); maxLength=overhead + (int)(0.5 * (float)(1 + name1.length())); callCount=createHardLinkMult(src,fileNames,tgt_mult,maxLength); assertEquals(3,callCount); tgt_multNames=tgt_mult.list(); Arrays.sort(fileNames); Arrays.sort(tgt_multNames); assertArrayEquals(fileNames,tgt_multNames); }

Class: org.apache.hadoop.fs.TestListFiles

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input path is a file */ @Test public void testFile() throws IOException { fs.mkdirs(TEST_DIR); writeFile(fs,FILE1,FILE_LEN); RemoteIterator itor=fs.listFiles(FILE1,true); LocatedFileStatus stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fs.makeQualified(FILE1),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); itor=fs.listFiles(FILE1,false); stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fs.makeQualified(FILE1),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); fs.delete(FILE1,true); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input path is a directory */ @Test public void testDirectory() throws IOException { fs.mkdirs(DIR1); RemoteIterator itor=fs.listFiles(DIR1,true); assertFalse(itor.hasNext()); itor=fs.listFiles(DIR1,false); assertFalse(itor.hasNext()); writeFile(fs,FILE2,FILE_LEN); itor=fs.listFiles(DIR1,true); LocatedFileStatus stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fs.makeQualified(FILE2),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); itor=fs.listFiles(DIR1,false); stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fs.makeQualified(FILE2),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); writeFile(fs,FILE1,FILE_LEN); writeFile(fs,FILE3,FILE_LEN); Set filesToFind=new HashSet(); filesToFind.add(fs.makeQualified(FILE1)); filesToFind.add(fs.makeQualified(FILE2)); filesToFind.add(fs.makeQualified(FILE3)); itor=fs.listFiles(TEST_DIR,true); stat=itor.next(); assertTrue(stat.isFile()); assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath())); stat=itor.next(); assertTrue(stat.isFile()); assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath())); stat=itor.next(); assertTrue(stat.isFile()); assertTrue("Path " + stat.getPath() + " unexpected",filesToFind.remove(stat.getPath())); assertFalse(itor.hasNext()); assertTrue(filesToFind.isEmpty()); itor=fs.listFiles(TEST_DIR,false); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fs.makeQualified(FILE1),stat.getPath()); assertFalse(itor.hasNext()); fs.delete(TEST_DIR,true); }

Class: org.apache.hadoop.fs.TestLocalDirAllocator

APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test that {@link LocalDirAllocator#getAllLocalPathsToRead(String,Configuration)} * returns correct filenames and "file" schema. * @throws IOException */ @Test(timeout=30000) public void testGetAllLocalPathsToRead() throws IOException { assumeTrue(!isWindows); String dir0=buildBufferDir(ROOT,0); String dir1=buildBufferDir(ROOT,1); try { conf.set(CONTEXT,dir0 + "," + dir1); assertTrue(localFs.mkdirs(new Path(dir0))); assertTrue(localFs.mkdirs(new Path(dir1))); localFs.create(new Path(dir0 + Path.SEPARATOR + FILENAME)); localFs.create(new Path(dir1 + Path.SEPARATOR + FILENAME)); final Iterable pathIterable=dirAllocator.getAllLocalPathsToRead(FILENAME,conf); int count=0; for ( final Path p : pathIterable) { count++; assertEquals(FILENAME,p.getName()); assertEquals("file",p.getFileSystem(conf).getUri().getScheme()); } assertEquals(2,count); try { Path p=pathIterable.iterator().next(); assertFalse("NoSuchElementException must be thrown, but returned [" + p + "] instead.",true); } catch ( NoSuchElementException nsee) { } final Iterable pathIterable2=dirAllocator.getAllLocalPathsToRead(FILENAME,conf); final Iterator it=pathIterable2.iterator(); try { it.remove(); assertFalse(true); } catch ( UnsupportedOperationException uoe) { } } finally { Shell.execCommand(new String[]{"chmod","u+w",BUFFER_DIR_ROOT}); rmBufferDirs(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test getLocalPathToRead() returns correct filename and "file" schema. * @throws IOException */ @Test(timeout=30000) public void testGetLocalPathToRead() throws IOException { assumeTrue(!isWindows); String dir=buildBufferDir(ROOT,0); try { conf.set(CONTEXT,dir); assertTrue(localFs.mkdirs(new Path(dir))); File f1=dirAllocator.createTmpFileForWrite(FILENAME,SMALL_FILE_SIZE,conf); Path p1=dirAllocator.getLocalPathToRead(f1.getName(),conf); assertEquals(f1.getName(),p1.getName()); assertEquals("file",p1.getFileSystem(conf).getUri().getScheme()); } finally { Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT)); rmBufferDirs(); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testShouldNotthrowNPE() throws Exception { Configuration conf1=new Configuration(); try { dirAllocator.getLocalPathForWrite("/test",conf1); fail("Exception not thrown when " + CONTEXT + " is not set"); } catch ( IOException e) { assertEquals(CONTEXT + " not configured",e.getMessage()); } catch ( NullPointerException e) { fail("Lack of configuration should not have thrown an NPE."); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Two buffer dirs. The first dir does not exist & is on a read-only disk; * The second dir exists & is RW * getLocalPathForWrite with checkAccess set to false should create a parent * directory. With checkAccess true, the directory should not be created. * @throws Exception */ @Test(timeout=30000) public void testLocalPathForWriteDirCreation() throws IOException { String dir0=buildBufferDir(ROOT,0); String dir1=buildBufferDir(ROOT,1); try { conf.set(CONTEXT,dir0 + "," + dir1); assertTrue(localFs.mkdirs(new Path(dir1))); BUFFER_ROOT.setReadOnly(); Path p1=dirAllocator.getLocalPathForWrite("p1/x",SMALL_FILE_SIZE,conf); assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory()); Path p2=dirAllocator.getLocalPathForWrite("p2/x",SMALL_FILE_SIZE,conf,false); try { localFs.getFileStatus(p2.getParent()); } catch ( Exception e) { assertEquals(e.getClass(),FileNotFoundException.class); } } finally { Shell.execCommand(Shell.getSetPermissionCommand("u+w",false,BUFFER_DIR_ROOT)); rmBufferDirs(); } }

Class: org.apache.hadoop.fs.TestLocalFSFileContextMainOperations

EqualityVerifier 
@Test public void testDefaultFilePermission() throws IOException { Path file=fileContextTestHelper.getTestRootPath(fc,"testDefaultFilePermission"); FileContextTestHelper.createFile(fc,file); FsPermission expect=FileContext.FILE_DEFAULT_PERM.applyUMask(fc.getUMask()); Assert.assertEquals(expect,fc.getFileStatus(file).getPermission()); }

Class: org.apache.hadoop.fs.TestLocalFileSystem

InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testListStatusWithColons() throws IOException { assumeTrue(!Shell.WINDOWS); File colonFile=new File(TEST_ROOT_DIR,"foo:bar"); colonFile.mkdirs(); FileStatus[] stats=fileSys.listStatus(new Path(TEST_ROOT_DIR)); assertEquals("Unexpected number of stats",1,stats.length); assertEquals("Bad path from stat",colonFile.getAbsolutePath(),stats[0].getPath().toUri().getPath()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=1000) public void testHomeDirectory() throws IOException { Path home=new Path(System.getProperty("user.home")).makeQualified(fileSys); Path fsHome=fileSys.getHomeDirectory(); assertEquals(home,fsHome); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testCreateFileAndMkdirs() throws IOException { Path test_dir=new Path(TEST_ROOT_DIR,"test_dir"); Path test_file=new Path(test_dir,"file1"); assertTrue(fileSys.mkdirs(test_dir)); final int fileSize=new Random().nextInt(1 << 20) + 1; writeFile(fileSys,test_file,fileSize); { final FileStatus status=fileSys.getFileStatus(test_file); Assert.assertEquals(fileSize,status.getLen()); final ContentSummary summary=fileSys.getContentSummary(test_dir); Assert.assertEquals(fileSize,summary.getLength()); } Path bad_dir=new Path(test_file,"another_dir"); try { fileSys.mkdirs(bad_dir); fail("Failed to detect existing file in path"); } catch ( ParentNotDirectoryException e) { } try { fileSys.mkdirs(null); fail("Failed to detect null in mkdir arg"); } catch ( IllegalArgumentException e) { } }

APIUtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test public void testListStatusReturnConsistentPathOnWindows() throws IOException { assumeTrue(Shell.WINDOWS); String dirNoDriveSpec=TEST_ROOT_DIR; if (dirNoDriveSpec.charAt(1) == ':') dirNoDriveSpec=dirNoDriveSpec.substring(2); File file=new File(dirNoDriveSpec,"foo"); file.mkdirs(); FileStatus[] stats=fileSys.listStatus(new Path(dirNoDriveSpec)); assertEquals("Unexpected number of stats",1,stats.length); assertEquals("Bad path from stat",new Path(file.getPath()).toUri().getPath(),stats[0].getPath().toUri().getPath()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=1000) public void testPathEscapes() throws IOException { Path path=new Path(TEST_ROOT_DIR,"foo%bar"); writeFile(fileSys,path,1); FileStatus status=fileSys.getFileStatus(path); assertEquals(path.makeQualified(fileSys),status.getPath()); cleanupFile(fileSys,path); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the capability of setting the working directory. */ @Test(timeout=10000) public void testWorkingDirectory() throws IOException { Path origDir=fileSys.getWorkingDirectory(); Path subdir=new Path(TEST_ROOT_DIR,"new"); try { assertTrue(!fileSys.exists(subdir)); assertTrue(fileSys.mkdirs(subdir)); assertTrue(fileSys.isDirectory(subdir)); fileSys.setWorkingDirectory(subdir); Path dir1=new Path("dir1"); assertTrue(fileSys.mkdirs(dir1)); assertTrue(fileSys.isDirectory(dir1)); fileSys.delete(dir1,true); assertTrue(!fileSys.exists(dir1)); Path file1=new Path("file1"); Path file2=new Path("sub/file2"); String contents=writeFile(fileSys,file1,1); fileSys.copyFromLocalFile(file1,file2); assertTrue(fileSys.exists(file1)); assertTrue(fileSys.isFile(file1)); cleanupFile(fileSys,file2); fileSys.copyToLocalFile(file1,file2); cleanupFile(fileSys,file2); fileSys.rename(file1,file2); assertTrue(!fileSys.exists(file1)); assertTrue(fileSys.exists(file2)); fileSys.rename(file2,file1); InputStream stm=fileSys.open(file1); byte[] buffer=new byte[3]; int bytesRead=stm.read(buffer,0,3); assertEquals(contents,new String(buffer,0,bytesRead)); stm.close(); } finally { fileSys.setWorkingDirectory(origDir); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testStripFragmentFromPath() throws Exception { FileSystem fs=FileSystem.getLocal(new Configuration()); Path pathQualified=TEST_PATH.makeQualified(fs.getUri(),fs.getWorkingDirectory()); Path pathWithFragment=new Path(new URI(pathQualified.toString() + "#glacier")); FileSystemTestHelper.createFile(fs,pathWithFragment); Path resolved=fs.resolvePath(pathWithFragment); assertEquals("resolvePath did not strip fragment from Path",pathQualified,resolved); }

EqualityVerifier 
@Test(timeout=1000) public void testStatistics() throws Exception { int fileSchemeCount=0; for ( Statistics stats : FileSystem.getAllStatistics()) { if (stats.getScheme().equals("file")) { fileSchemeCount++; } } assertEquals(1,fileSchemeCount); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testSetTimes() throws Exception { Path path=new Path(TEST_ROOT_DIR,"set-times"); writeFile(fileSys,path,1); long newModTime=12345000; FileStatus status=fileSys.getFileStatus(path); assertTrue("check we're actually changing something",newModTime != status.getModificationTime()); long accessTime=status.getAccessTime(); fileSys.setTimes(path,newModTime,-1); status=fileSys.getFileStatus(path); assertEquals(newModTime,status.getModificationTime()); assertEquals(accessTime,status.getAccessTime()); }

Class: org.apache.hadoop.fs.TestPath

EqualityVerifier 
/** * Test URIs created from Path objects */ @Test(timeout=30000) public void testPathToUriConversion() throws URISyntaxException, IOException { assertEquals("? mark char in to URI",new URI(null,null,"/foo?bar",null,null),new Path("/foo?bar").toUri()); assertEquals("escape slashes chars in to URI",new URI(null,null,"/foo\"bar",null,null),new Path("/foo\"bar").toUri()); assertEquals("spaces in chars to URI",new URI(null,null,"/foo bar",null,null),new Path("/foo bar").toUri()); assertEquals("/foo?bar",new Path("http://localhost/foo?bar").toUri().getPath()); assertEquals("/foo",new URI("http://localhost/foo?bar").getPath()); assertEquals(new URI("/foo;bar").getPath(),new Path("/foo;bar").toUri().getPath()); assertEquals(new URI("/foo;bar"),new Path("/foo;bar").toUri()); assertEquals(new URI("/foo+bar"),new Path("/foo+bar").toUri()); assertEquals(new URI("/foo-bar"),new Path("/foo-bar").toUri()); assertEquals(new URI("/foo=bar"),new Path("/foo=bar").toUri()); assertEquals(new URI("/foo,bar"),new Path("/foo,bar").toUri()); }

EqualityVerifier 
@Test(timeout=30000) public void testGetName(){ assertEquals("",new Path("/").getName()); assertEquals("foo",new Path("foo").getName()); assertEquals("foo",new Path("/foo").getName()); assertEquals("foo",new Path("/foo/").getName()); assertEquals("bar",new Path("/foo/bar").getName()); assertEquals("bar",new Path("hdfs://host/foo/bar").getName()); }

BranchVerifier EqualityVerifier 
@Test(timeout=30000) public void testParent(){ assertEquals(new Path("/foo"),new Path("/foo/bar").getParent()); assertEquals(new Path("foo"),new Path("foo/bar").getParent()); assertEquals(new Path("/"),new Path("/foo").getParent()); assertEquals(null,new Path("/").getParent()); if (Path.WINDOWS) { assertEquals(new Path("c:/"),new Path("c:/foo").getParent()); } }

EqualityVerifier 
/** * Test that Windows paths are correctly handled */ @Test(timeout=5000) public void testWindowsPaths() throws URISyntaxException, IOException { if (!Path.WINDOWS) { return; } assertEquals(new Path("c:\\foo\\bar").toString(),"c:/foo/bar"); assertEquals(new Path("c:/foo/bar").toString(),"c:/foo/bar"); assertEquals(new Path("/c:/foo/bar").toString(),"c:/foo/bar"); assertEquals(new Path("file://c:/foo/bar").toString(),"file://c:/foo/bar"); }

EqualityVerifier 
/** * Test reserved characters in URIs (and therefore Paths) */ @Test(timeout=30000) public void testReservedCharacters() throws URISyntaxException, IOException { assertEquals("/foo%20bar",new URI(null,null,"/foo bar",null,null).getRawPath()); assertEquals("/foo bar",new URI(null,null,"/foo bar",null,null).getPath()); assertEquals("/foo%20bar",new URI(null,null,"/foo bar",null,null).toString()); assertEquals("/foo%20bar",new Path("/foo bar").toUri().toString()); assertEquals("/foo;bar",new URI("/foo;bar").getPath()); assertEquals("/foo;bar",new URI("/foo;bar").getRawPath()); assertEquals("/foo+bar",new URI("/foo+bar").getPath()); assertEquals("/foo+bar",new URI("/foo+bar").getRawPath()); assertEquals("/foo bar",new Path("http://localhost/foo bar").toUri().getPath()); assertEquals("/foo%20bar",new Path("http://localhost/foo bar").toUri().toURL().getPath()); assertEquals("/foo?bar",new URI("http","localhost","/foo?bar",null,null).getPath()); assertEquals("/foo%3Fbar",new URI("http","localhost","/foo?bar",null,null).toURL().getPath()); }

BranchVerifier EqualityVerifier 
@Test(timeout=30000) public void testChild(){ assertEquals(new Path("."),new Path(".",".")); assertEquals(new Path("/"),new Path("/",".")); assertEquals(new Path("/"),new Path(".","/")); assertEquals(new Path("/foo"),new Path("/","foo")); assertEquals(new Path("/foo/bar"),new Path("/foo","bar")); assertEquals(new Path("/foo/bar/baz"),new Path("/foo/bar","baz")); assertEquals(new Path("/foo/bar/baz"),new Path("/foo","bar/baz")); assertEquals(new Path("foo"),new Path(".","foo")); assertEquals(new Path("foo/bar"),new Path("foo","bar")); assertEquals(new Path("foo/bar/baz"),new Path("foo","bar/baz")); assertEquals(new Path("foo/bar/baz"),new Path("foo/bar","baz")); assertEquals(new Path("/foo"),new Path("/bar","/foo")); if (Path.WINDOWS) { assertEquals(new Path("c:/foo"),new Path("/bar","c:/foo")); assertEquals(new Path("c:/foo"),new Path("d:/bar","c:/foo")); } }

EqualityVerifier 
@Test(timeout=30000) public void testMakeQualified() throws URISyntaxException { URI defaultUri=new URI("hdfs://host1/dir1"); URI wd=new URI("hdfs://host2/dir2"); assertEquals(new Path("hdfs://host1/dir/file"),new Path("file").makeQualified(defaultUri,new Path("/dir"))); assertEquals(new Path("hdfs://host2/dir2/file"),new Path("file").makeQualified(defaultUri,new Path(wd))); }

EqualityVerifier 
@Test(timeout=30000) public void testMergePaths(){ assertEquals(new Path("/foo/bar"),Path.mergePaths(new Path("/foo"),new Path("/bar"))); assertEquals(new Path("/foo/bar/baz"),Path.mergePaths(new Path("/foo/bar"),new Path("/baz"))); assertEquals(new Path("/foo/bar/baz"),Path.mergePaths(new Path("/foo"),new Path("/bar/baz"))); assertEquals(new Path(Shell.WINDOWS ? "/C:/foo/bar" : "/C:/foo/C:/bar"),Path.mergePaths(new Path("/C:/foo"),new Path("/C:/bar"))); assertEquals(new Path(Shell.WINDOWS ? "/C:/bar" : "/C:/C:/bar"),Path.mergePaths(new Path("/C:/"),new Path("/C:/bar"))); assertEquals(new Path("/bar"),Path.mergePaths(new Path("/"),new Path("/bar"))); assertEquals(new Path("viewfs:///foo/bar"),Path.mergePaths(new Path("viewfs:///foo"),new Path("file:///bar"))); assertEquals(new Path("viewfs://vfsauthority/foo/bar"),Path.mergePaths(new Path("viewfs://vfsauthority/foo"),new Path("file://fileauthority/bar"))); }

EqualityVerifier 
/** * Test Path objects created from other Path objects */ @Test(timeout=30000) public void testChildParentResolution() throws URISyntaxException, IOException { Path parent=new Path("foo1://bar1/baz1"); Path child=new Path("foo2://bar2/baz2"); assertEquals(child,new Path(parent,child)); }

BranchVerifier EqualityVerifier 
@Test(timeout=30000) public void testPathThreeArgContructor(){ assertEquals(new Path("foo"),new Path(null,null,"foo")); assertEquals(new Path("scheme:///foo"),new Path("scheme",null,"/foo")); assertEquals(new Path("scheme://authority/foo"),new Path("scheme","authority","/foo")); if (Path.WINDOWS) { assertEquals(new Path("c:/foo/bar"),new Path(null,null,"c:/foo/bar")); assertEquals(new Path("c:/foo/bar"),new Path(null,null,"/c:/foo/bar")); } else { assertEquals(new Path("./a:b"),new Path(null,null,"a:b")); } if (Path.WINDOWS) { assertEquals(new Path("c:/foo/bar"),new Path("/fou",new Path(null,null,"c:/foo/bar"))); assertEquals(new Path("c:/foo/bar"),new Path("/fou",new Path(null,null,"/c:/foo/bar"))); assertEquals(new Path("/foo/bar"),new Path("/foo",new Path(null,null,"bar"))); } else { assertEquals(new Path("/foo/bar/a:b"),new Path("/foo/bar",new Path(null,null,"a:b"))); assertEquals(new Path("/a:b"),new Path("/foo/bar",new Path(null,null,"/a:b"))); } }

BranchVerifier EqualityVerifier 
@Test(timeout=30000) public void testNormalize() throws URISyntaxException { assertEquals("",new Path(".").toString()); assertEquals("..",new Path("..").toString()); assertEquals("/",new Path("/").toString()); assertEquals("/",new Path("//").toString()); assertEquals("/",new Path("///").toString()); assertEquals("//foo/",new Path("//foo/").toString()); assertEquals("//foo/",new Path("//foo//").toString()); assertEquals("//foo/bar",new Path("//foo//bar").toString()); assertEquals("/foo",new Path("/foo/").toString()); assertEquals("/foo",new Path("/foo/").toString()); assertEquals("foo",new Path("foo/").toString()); assertEquals("foo",new Path("foo//").toString()); assertEquals("foo/bar",new Path("foo//bar").toString()); assertEquals("hdfs://foo/foo2/bar/baz/",new Path(new URI("hdfs://foo//foo2///bar/baz///")).toString()); if (Path.WINDOWS) { assertEquals("c:/a/b",new Path("c:\\a\\b").toString()); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testURI() throws URISyntaxException, IOException { URI uri=new URI("file:///bar#baz"); Path path=new Path(uri); assertTrue(uri.equals(new URI(path.toString()))); FileSystem fs=path.getFileSystem(new Configuration()); assertTrue(uri.equals(new URI(fs.makeQualified(path).toString()))); URI uri2=new URI("file:///bar/baz"); assertTrue(uri2.equals(new URI(fs.makeQualified(new Path(uri2)).toString()))); assertEquals("foo://bar/baz#boo",new Path("foo://bar/",new Path(new URI("/baz#boo"))).toString()); assertEquals("foo://bar/baz/fud#boo",new Path(new Path(new URI("foo://bar/baz#bud")),new Path(new URI("fud#boo"))).toString()); assertEquals("foo://bar/fud#boo",new Path(new Path(new URI("foo://bar/baz#bud")),new Path(new URI("/fud#boo"))).toString()); }

EqualityVerifier 
@Test(timeout=30000) public void testScheme() throws java.io.IOException { assertEquals("foo:/bar",new Path("foo:/","/bar").toString()); assertEquals("foo://bar/baz",new Path("foo://bar/","/baz").toString()); }

EqualityVerifier 
@Test(timeout=30000) public void testDots(){ assertEquals(new Path("/foo/bar/baz").toString(),"/foo/bar/baz"); assertEquals(new Path("/foo/bar",".").toString(),"/foo/bar"); assertEquals(new Path("/foo/bar/../baz").toString(),"/foo/baz"); assertEquals(new Path("/foo/bar/./baz").toString(),"/foo/bar/baz"); assertEquals(new Path("/foo/bar/baz/../../fud").toString(),"/foo/fud"); assertEquals(new Path("/foo/bar/baz/.././../fud").toString(),"/foo/fud"); assertEquals(new Path("../../foo/bar").toString(),"../../foo/bar"); assertEquals(new Path(".././../foo/bar").toString(),"../../foo/bar"); assertEquals(new Path("./foo/bar/baz").toString(),"foo/bar/baz"); assertEquals(new Path("/foo/bar/../../baz/boo").toString(),"/baz/boo"); assertEquals(new Path("foo/bar/").toString(),"foo/bar"); assertEquals(new Path("foo/bar/../baz").toString(),"foo/baz"); assertEquals(new Path("foo/bar/../../baz/boo").toString(),"baz/boo"); assertEquals(new Path("/foo/bar","baz/boo").toString(),"/foo/bar/baz/boo"); assertEquals(new Path("foo/bar/","baz/bud").toString(),"foo/bar/baz/bud"); assertEquals(new Path("/foo/bar","../../boo/bud").toString(),"/boo/bud"); assertEquals(new Path("foo/bar","../../boo/bud").toString(),"boo/bud"); assertEquals(new Path(".","boo/bud").toString(),"boo/bud"); assertEquals(new Path("/foo/bar/baz","../../boo/bud").toString(),"/foo/boo/bud"); assertEquals(new Path("foo/bar/baz","../../boo/bud").toString(),"foo/boo/bud"); assertEquals(new Path("../../","../../boo/bud").toString(),"../../../../boo/bud"); assertEquals(new Path("../../foo","../../../boo/bud").toString(),"../../../../boo/bud"); assertEquals(new Path("../../foo/bar","../boo/bud").toString(),"../../foo/boo/bud"); assertEquals(new Path("foo/bar/baz","../../..").toString(),""); assertEquals(new Path("foo/bar/baz","../../../../..").toString(),"../.."); }

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=30000) public void testGlobEscapeStatus() throws Exception { if (Shell.WINDOWS) return; FileSystem lfs=FileSystem.getLocal(new Configuration()); Path testRoot=lfs.makeQualified(new Path(System.getProperty("test.build.data","test/build/data"),"testPathGlob")); lfs.delete(testRoot,true); lfs.mkdirs(testRoot); assertTrue(lfs.isDirectory(testRoot)); lfs.setWorkingDirectory(testRoot); Path paths[]=new Path[]{new Path(testRoot,"*/f"),new Path(testRoot,"d1/f"),new Path(testRoot,"d2/f")}; Arrays.sort(paths); for ( Path p : paths) { lfs.create(p).close(); assertTrue(lfs.exists(p)); } FileStatus stats[]=lfs.listStatus(new Path(testRoot,"*")); assertEquals(1,stats.length); assertEquals(new Path(testRoot,"*/f"),stats[0].getPath()); stats=lfs.globStatus(new Path(testRoot,"*")); Arrays.sort(stats); Path parentPaths[]=new Path[paths.length]; for (int i=0; i < paths.length; i++) { parentPaths[i]=paths[i].getParent(); } assertEquals(mergeStatuses(parentPaths),mergeStatuses(stats)); stats=lfs.globStatus(new Path(testRoot,"\\*")); assertEquals(1,stats.length); assertEquals(new Path(testRoot,"*"),stats[0].getPath()); stats=lfs.globStatus(new Path(testRoot,"*/f")); assertEquals(paths.length,stats.length); assertEquals(mergeStatuses(paths),mergeStatuses(stats)); stats=lfs.globStatus(new Path(testRoot,"\\*/f")); assertEquals(1,stats.length); assertEquals(new Path(testRoot,"*/f"),stats[0].getPath()); stats=lfs.globStatus(new Path(testRoot,"\\*/*")); assertEquals(1,stats.length); assertEquals(new Path(testRoot,"*/f"),stats[0].getPath()); }

Class: org.apache.hadoop.fs.TestResolveHdfsSymlink

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Tests resolution of an hdfs symlink to the local file system. * @throws IOException * @throws InterruptedException */ @Test public void testFcResolveAfs() throws IOException, InterruptedException { Configuration conf=new Configuration(); FileContext fcLocal=FileContext.getLocalFSFileContext(); FileContext fcHdfs=FileContext.getFileContext(cluster.getFileSystem().getUri()); final String localTestRoot=helper.getAbsoluteTestRootDir(fcLocal); Path alphaLocalPath=new Path(fcLocal.getDefaultFileSystem().getUri().toString(),new File(localTestRoot,"alpha").getAbsolutePath()); DFSTestUtil.createFile(FileSystem.getLocal(conf),alphaLocalPath,16,(short)1,2); Path linkTarget=new Path(fcLocal.getDefaultFileSystem().getUri().toString(),localTestRoot); Path hdfsLink=new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),"/tmp/link"); fcHdfs.createSymlink(linkTarget,hdfsLink,true); Path alphaHdfsPathViaLink=new Path(fcHdfs.getDefaultFileSystem().getUri().toString() + "/tmp/link/alpha"); Set afsList=fcHdfs.resolveAbstractFileSystems(alphaHdfsPathViaLink); Assert.assertEquals(2,afsList.size()); for ( AbstractFileSystem afs : afsList) { if ((!afs.equals(fcHdfs.getDefaultFileSystem())) && (!afs.equals(fcLocal.getDefaultFileSystem()))) { Assert.fail("Failed to resolve AFS correctly"); } } }

Class: org.apache.hadoop.fs.TestStat

EqualityVerifier 
@Test(timeout=10000) public void testStatEnvironment() throws Exception { assertEquals("C",stat.getEnvironment("LANG")); }

Class: org.apache.hadoop.fs.TestSymlinkHdfs

InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testLinkAcrossFileSystems() throws IOException { Path localDir=new Path("file://" + wrapper.getAbsoluteTestRootDir() + "/test"); Path localFile=new Path("file://" + wrapper.getAbsoluteTestRootDir() + "/test/file"); Path link=new Path(testBaseDir1(),"linkToFile"); FSTestWrapper localWrapper=wrapper.getLocalFSWrapper(); localWrapper.delete(localDir,true); localWrapper.mkdir(localDir,FileContext.DEFAULT_PERM,true); localWrapper.setWorkingDirectory(localDir); assertEquals(localDir,localWrapper.getWorkingDirectory()); createAndWriteFile(localWrapper,localFile); wrapper.createSymlink(localFile,link,false); readFile(link); assertEquals(fileSize,wrapper.getFileStatus(link).getLen()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testSetPermissionAffectsTarget() throws IOException { Path file=new Path(testBaseDir1(),"file"); Path dir=new Path(testBaseDir2()); Path linkToFile=new Path(testBaseDir1(),"linkToFile"); Path linkToDir=new Path(testBaseDir1(),"linkToDir"); createAndWriteFile(file); wrapper.createSymlink(file,linkToFile,false); wrapper.createSymlink(dir,linkToDir,false); FsPermission perms=wrapper.getFileLinkStatus(linkToFile).getPermission(); wrapper.setPermission(linkToFile,new FsPermission((short)0664)); wrapper.setOwner(linkToFile,"user","group"); assertEquals(perms,wrapper.getFileLinkStatus(linkToFile).getPermission()); FileStatus stat=wrapper.getFileStatus(file); assertEquals(0664,stat.getPermission().toShort()); assertEquals("user",stat.getOwner()); assertEquals("group",stat.getGroup()); assertEquals(stat.getPermission(),wrapper.getFileStatus(linkToFile).getPermission()); perms=wrapper.getFileLinkStatus(linkToDir).getPermission(); wrapper.setPermission(linkToDir,new FsPermission((short)0664)); wrapper.setOwner(linkToDir,"user","group"); assertEquals(perms,wrapper.getFileLinkStatus(linkToDir).getPermission()); stat=wrapper.getFileStatus(dir); assertEquals(0664,stat.getPermission().toShort()); assertEquals("user",stat.getOwner()); assertEquals("group",stat.getGroup()); assertEquals(stat.getPermission(),wrapper.getFileStatus(linkToDir).getPermission()); }

EqualityVerifier 
@Test(timeout=10000) public void testWebHDFS() throws IOException { Path file=new Path(testBaseDir1(),"file"); Path link=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file); webhdfs.createSymlink(file,link,false); wrapper.setReplication(link,(short)2); assertEquals(0,wrapper.getFileLinkStatus(link).getReplication()); assertEquals(2,wrapper.getFileStatus(link).getReplication()); assertEquals(2,wrapper.getFileStatus(file).getReplication()); }

EqualityVerifier 
@Test(timeout=10000) public void testSetReplication() throws IOException { Path file=new Path(testBaseDir1(),"file"); Path link=new Path(testBaseDir1(),"linkToFile"); createAndWriteFile(file); wrapper.createSymlink(file,link,false); wrapper.setReplication(link,(short)2); assertEquals(0,wrapper.getFileLinkStatus(link).getReplication()); assertEquals(2,wrapper.getFileStatus(link).getReplication()); assertEquals(2,wrapper.getFileStatus(file).getReplication()); }

BranchVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testCreateLinkToSlash() throws IOException { Path dir=new Path(testBaseDir1()); Path file=new Path(testBaseDir1(),"file"); Path link=new Path(testBaseDir1(),"linkToSlash"); Path fileViaLink=new Path(testBaseDir1() + "/linkToSlash" + testBaseDir1()+ "/file"); createAndWriteFile(file); wrapper.setWorkingDirectory(dir); wrapper.createSymlink(new Path("/"),link,false); readFile(fileViaLink); assertEquals(fileSize,wrapper.getFileStatus(fileViaLink).getLen()); if (wrapper instanceof FileContextTestWrapper) { FSTestWrapper localWrapper=wrapper.getLocalFSWrapper(); Path linkQual=new Path(cluster.getURI(0).toString(),fileViaLink); assertEquals(fileSize,localWrapper.getFileStatus(linkQual).getLen()); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCreateLinkMaxPathLink() throws IOException { Path dir=new Path(testBaseDir1()); Path file=new Path(testBaseDir1(),"file"); final int maxPathLen=HdfsConstants.MAX_PATH_LENGTH; final int dirLen=dir.toString().length() + 1; int len=maxPathLen - dirLen; StringBuilder sb=new StringBuilder(""); for (int i=0; i < (len / 10); i++) { sb.append("0123456789"); } for (int i=0; i < (len % 10); i++) { sb.append("x"); } Path link=new Path(sb.toString()); assertEquals(maxPathLen,dirLen + link.toString().length()); createAndWriteFile(file); wrapper.setWorkingDirectory(dir); wrapper.createSymlink(file,link,false); readFile(link); link=new Path(sb.toString() + "x"); try { wrapper.createSymlink(file,link,false); fail("Path name should be too long"); } catch ( IOException x) { } }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=10000) public void testLinkOwner() throws IOException { Path file=new Path(testBaseDir1(),"file"); Path link=new Path(testBaseDir1(),"symlinkToFile"); createAndWriteFile(file); wrapper.createSymlink(file,link,false); FileStatus statFile=wrapper.getFileStatus(file); FileStatus statLink=wrapper.getFileStatus(link); assertEquals(statLink.getOwner(),statFile.getOwner()); }

Class: org.apache.hadoop.fs.TestSymlinkLocalFS

UtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testGetLinkStatusPartQualTarget() throws IOException { assumeTrue(!emulatingSymlinksOnWindows()); Path fileAbs=new Path(testBaseDir1() + "/file"); Path fileQual=new Path(testURI().toString(),fileAbs); Path dir=new Path(testBaseDir1()); Path link=new Path(testBaseDir1() + "/linkToFile"); Path dirNew=new Path(testBaseDir2()); Path linkNew=new Path(testBaseDir2() + "/linkToFile"); wrapper.delete(dirNew,true); createAndWriteFile(fileQual); wrapper.setWorkingDirectory(dir); wrapper.createSymlink(fileQual,link,false); assertEquals(fileQual,wrapper.getFileLinkStatus(link).getSymlink()); wrapper.rename(dir,dirNew); assertEquals(fileQual,wrapper.getFileLinkStatus(linkNew).getSymlink()); try { readFile(linkNew); fail("The link should be dangling now."); } catch ( FileNotFoundException x) { } Path anotherFs=new Path("hdfs://host:1000/dir/file"); FileUtil.fullyDelete(new File(linkNew.toString())); try { wrapper.createSymlink(anotherFs,linkNew,false); fail("Created a local fs link to a non-local fs"); } catch ( IOException x) { } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testDanglingLink() throws IOException { assumeTrue(!Path.WINDOWS); Path fileAbs=new Path(testBaseDir1() + "/file"); Path fileQual=new Path(testURI().toString(),fileAbs); Path link=new Path(testBaseDir1() + "/linkToFile"); Path linkQual=new Path(testURI().toString(),link.toString()); wrapper.createSymlink(fileAbs,link,false); FileUtil.fullyDelete(new File(link.toUri().getPath())); wrapper.createSymlink(fileAbs,link,false); try { wrapper.getFileStatus(link); fail("Got FileStatus for dangling link"); } catch ( FileNotFoundException f) { } UserGroupInformation user=UserGroupInformation.getCurrentUser(); FileStatus fsd=wrapper.getFileLinkStatus(link); assertEquals(fileQual,fsd.getSymlink()); assertTrue(fsd.isSymlink()); assertFalse(fsd.isDirectory()); assertEquals(user.getUserName(),fsd.getOwner()); assertEquals(user.getGroupNames()[0],fsd.getGroup()); assertEquals(linkQual,fsd.getPath()); try { readFile(link); fail("Got FileStatus for dangling link"); } catch ( FileNotFoundException f) { } createAndWriteFile(fileAbs); wrapper.getFileStatus(link); }

Class: org.apache.hadoop.fs.TestUrlStreamHandler

APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test opening and reading from an InputStream through a hdfs:// URL. *

* First generate a file with some content through the FileSystem API, then * try to open and read the file through the URL stream API. * @throws IOException */ @Test public void testDfsUrls() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); FsUrlStreamHandlerFactory factory=new org.apache.hadoop.fs.FsUrlStreamHandlerFactory(); java.net.URL.setURLStreamHandlerFactory(factory); Path filePath=new Path("/thefile"); try { byte[] fileContent=new byte[1024]; for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i; OutputStream os=fs.create(filePath); os.write(fileContent); os.close(); URI uri=fs.getUri(); URL fileURL=new URL(uri.getScheme(),uri.getHost(),uri.getPort(),filePath.toString()); InputStream is=fileURL.openStream(); assertNotNull(is); byte[] bytes=new byte[4096]; assertEquals(1024,is.read(bytes)); is.close(); for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]); fs.delete(filePath,false); } finally { fs.close(); cluster.shutdown(); } }


APIUtilityVerifier IterativeVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test opening and reading from an InputStream through a file:// URL. * @throws IOException * @throws URISyntaxException */ @Test public void testFileUrls() throws IOException, URISyntaxException { Configuration conf=new HdfsConfiguration(); if (!TEST_ROOT_DIR.exists()) { if (!TEST_ROOT_DIR.mkdirs()) throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR); } File tmpFile=new File(TEST_ROOT_DIR,"thefile"); URI uri=tmpFile.toURI(); FileSystem fs=FileSystem.get(uri,conf); try { byte[] fileContent=new byte[1024]; for (int i=0; i < fileContent.length; ++i) fileContent[i]=(byte)i; OutputStream os=fs.create(new Path(uri.getPath())); os.write(fileContent); os.close(); URL fileURL=uri.toURL(); InputStream is=fileURL.openStream(); assertNotNull(is); byte[] bytes=new byte[4096]; assertEquals(1024,is.read(bytes)); is.close(); for (int i=0; i < fileContent.length; ++i) assertEquals(fileContent[i],bytes[i]); fs.delete(new Path(uri.getPath()),false); } finally { fs.close(); } }

Class: org.apache.hadoop.fs.TestXAttr

BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testXAttrEquals(){ assertNotSame(XATTR1,XATTR2); assertNotSame(XATTR2,XATTR3); assertNotSame(XATTR3,XATTR4); assertNotSame(XATTR4,XATTR5); assertEquals(XATTR,XATTR1); assertEquals(XATTR1,XATTR1); assertEquals(XATTR2,XATTR2); assertEquals(XATTR3,XATTR3); assertEquals(XATTR4,XATTR4); assertEquals(XATTR5,XATTR5); assertFalse(XATTR1.equals(XATTR2)); assertFalse(XATTR2.equals(XATTR3)); assertFalse(XATTR3.equals(XATTR4)); assertFalse(XATTR4.equals(XATTR5)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testXAttrHashCode(){ assertEquals(XATTR.hashCode(),XATTR1.hashCode()); assertFalse(XATTR1.hashCode() == XATTR2.hashCode()); assertFalse(XATTR2.hashCode() == XATTR3.hashCode()); assertFalse(XATTR3.hashCode() == XATTR4.hashCode()); assertFalse(XATTR4.hashCode() == XATTR5.hashCode()); }

Class: org.apache.hadoop.fs.azure.NativeAzureFileSystemBaseTest

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetOwnerOnFolder() throws Exception { Path newFolder=new Path("testOwner"); assertTrue(fs.mkdirs(newFolder)); fs.setOwner(newFolder,"newUser",null); FileStatus newStatus=fs.getFileStatus(newFolder); assertNotNull(newStatus); assertEquals("newUser",newStatus.getOwner()); assertTrue(newStatus.isDirectory()); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testReadingDirectoryAsFile() throws Exception { Path dir=new Path("/x"); assertTrue(fs.mkdirs(dir)); try { fs.open(dir).close(); assertTrue("Should've thrown",false); } catch ( FileNotFoundException ex) { assertEquals("/x is a directory not a file.",ex.getMessage()); } }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreatingFileOverDirectory() throws Exception { Path dir=new Path("/x"); assertTrue(fs.mkdirs(dir)); try { fs.create(dir).close(); assertTrue("Should've thrown",false); } catch ( IOException ex) { assertEquals("Cannot create file /x; already exists as a directory.",ex.getMessage()); } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testUriEncodingMoreComplexCharacters() throws Exception { String fileName="!#$'()*;=[]%"; String directoryName="*;=[]%!#$'()"; fs.create(new Path(directoryName,fileName)).close(); FileStatus[] listing=fs.listStatus(new Path(directoryName)); assertEquals(1,listing.length); assertEquals(fileName,listing[0].getPath().getName()); FileStatus status=fs.getFileStatus(new Path(directoryName,fileName)); assertEquals(fileName,status.getPath().getName()); InputStream stream=fs.open(new Path(directoryName,fileName)); assertNotNull(stream); stream.close(); assertTrue(fs.delete(new Path(directoryName,fileName),true)); assertTrue(fs.delete(new Path(directoryName),true)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testStatistics() throws Exception { FileSystem.clearStatistics(); FileSystem.Statistics stats=FileSystem.getStatistics("wasb",NativeAzureFileSystem.class); assertEquals(0,stats.getBytesRead()); assertEquals(0,stats.getBytesWritten()); Path newFile=new Path("testStats"); writeString(newFile,"12345678"); assertEquals(8,stats.getBytesWritten()); assertEquals(0,stats.getBytesRead()); String readBack=readString(newFile); assertEquals("12345678",readBack); assertEquals(8,stats.getBytesRead()); assertEquals(8,stats.getBytesWritten()); assertTrue(fs.delete(newFile,true)); assertEquals(8,stats.getBytesRead()); assertEquals(8,stats.getBytesWritten()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testStoreDeleteFolder() throws Exception { Path testFolder=new Path("storeDeleteFolder"); assertFalse(fs.exists(testFolder)); assertTrue(fs.mkdirs(testFolder)); assertTrue(fs.exists(testFolder)); FileStatus status=fs.getFileStatus(testFolder); assertNotNull(status); assertTrue(status.isDirectory()); assertEquals(new FsPermission((short)0755),status.getPermission()); Path innerFile=new Path(testFolder,"innerFile"); assertTrue(fs.createNewFile(innerFile)); assertTrue(fs.exists(innerFile)); assertTrue(fs.delete(testFolder,true)); assertFalse(fs.exists(innerFile)); assertFalse(fs.exists(testFolder)); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testStoreRetrieveFile() throws Exception { Path testFile=new Path("unit-test-file"); writeString(testFile,"Testing"); assertTrue(fs.exists(testFile)); FileStatus status=fs.getFileStatus(testFile); assertNotNull(status); assertEquals(new FsPermission((short)0644),status.getPermission()); assertEquals("Testing",readString(testFile)); fs.delete(testFile,true); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testUriEncoding() throws Exception { fs.create(new Path("p/t%5Fe")).close(); FileStatus[] listing=fs.listStatus(new Path("p")); assertEquals(1,listing.length); assertEquals("t%5Fe",listing[0].getPath().getName()); assertTrue(fs.rename(new Path("p"),new Path("q"))); assertTrue(fs.delete(new Path("q"),true)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCopyFromLocalFileSystem() throws Exception { Path localFilePath=new Path(System.getProperty("test.build.data","azure_test")); FileSystem localFs=FileSystem.get(new Configuration()); localFs.delete(localFilePath,true); try { writeString(localFs,localFilePath,"Testing"); Path dstPath=new Path("copiedFromLocal"); assertTrue(FileUtil.copy(localFs,localFilePath,fs,dstPath,false,fs.getConf())); assertTrue(fs.exists(dstPath)); assertEquals("Testing",readString(fs,dstPath)); fs.delete(dstPath,true); } finally { localFs.delete(localFilePath,true); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListDirectory() throws Exception { Path rootFolder=new Path("testingList"); assertTrue(fs.mkdirs(rootFolder)); FileStatus[] listed=fs.listStatus(rootFolder); assertEquals(0,listed.length); Path innerFolder=new Path(rootFolder,"inner"); assertTrue(fs.mkdirs(innerFolder)); listed=fs.listStatus(rootFolder); assertEquals(1,listed.length); assertTrue(listed[0].isDirectory()); Path innerFile=new Path(innerFolder,"innerFile"); writeString(innerFile,"testing"); listed=fs.listStatus(rootFolder); assertEquals(1,listed.length); assertTrue(listed[0].isDirectory()); listed=fs.listStatus(innerFolder); assertEquals(1,listed.length); assertFalse(listed[0].isDirectory()); assertTrue(fs.delete(rootFolder,true)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetOwnerOnFile() throws Exception { Path newFile=new Path("testOwner"); OutputStream output=fs.create(newFile); output.write(13); output.close(); fs.setOwner(newFile,"newUser",null); FileStatus newStatus=fs.getFileStatus(newFile); assertNotNull(newStatus); assertEquals("newUser",newStatus.getOwner()); assertEquals("supergroup",newStatus.getGroup()); assertEquals(1,newStatus.getLen()); fs.setOwner(newFile,null,"newGroup"); newStatus=fs.getFileStatus(newFile); assertNotNull(newStatus); assertEquals("newUser",newStatus.getOwner()); assertEquals("newGroup",newStatus.getGroup()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetPermissionOnFolder() throws Exception { Path newFolder=new Path("testPermission"); assertTrue(fs.mkdirs(newFolder)); FsPermission newPermission=new FsPermission((short)0600); fs.setPermission(newFolder,newPermission); FileStatus newStatus=fs.getFileStatus(newFolder); assertNotNull(newStatus); assertEquals(newPermission,newStatus.getPermission()); assertTrue(newStatus.isDirectory()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetPermissionOnFile() throws Exception { Path newFile=new Path("testPermission"); OutputStream output=fs.create(newFile); output.write(13); output.close(); FsPermission newPermission=new FsPermission((short)0700); fs.setPermission(newFile,newPermission); FileStatus newStatus=fs.getFileStatus(newFile); assertNotNull(newStatus); assertEquals(newPermission,newStatus.getPermission()); assertEquals("supergroup",newStatus.getGroup()); assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),newStatus.getOwner()); assertEquals(1,newStatus.getLen()); }

Class: org.apache.hadoop.fs.azure.TestAzureConcurrentOutOfBandIo

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testReadOOBWrites() throws Exception { byte[] dataBlockWrite=new byte[UPLOAD_BLOCK_SIZE]; byte[] dataBlockRead=new byte[UPLOAD_BLOCK_SIZE]; DataOutputStream outputStream=testAccount.getStore().storefile("WASB_String.txt",new PermissionStatus("","",FsPermission.getDefault())); Arrays.fill(dataBlockWrite,(byte)255); for (int i=0; i < NUMBER_OF_BLOCKS; i++) { outputStream.write(dataBlockWrite); } outputStream.flush(); outputStream.close(); DataBlockWriter writeBlockTask=new DataBlockWriter(testAccount,"WASB_String.txt"); writeBlockTask.startWriting(); int count=0; DataInputStream inputStream=null; for (int i=0; i < 5; i++) { try { inputStream=testAccount.getStore().retrieve("WASB_String.txt",0); count=0; int c=0; while (c >= 0) { c=inputStream.read(dataBlockRead,0,UPLOAD_BLOCK_SIZE); if (c < 0) { break; } count+=c; } } catch ( IOException e) { System.out.println(e.getCause().toString()); e.printStackTrace(); fail(); } if (null != inputStream) { inputStream.close(); } } writeBlockTask.stopWriting(); assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE,count); }

Class: org.apache.hadoop.fs.azure.TestBlobMetadata

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that WASB understands the old-style ASV metadata and changes it when * it gets the chance. */ @Test public void testOldPermissionMetadata() throws Exception { Path selfishFile=new Path("/noOneElse"); HashMap metadata=new HashMap(); metadata.put("asv_permission",getExpectedPermissionString("rw-------")); backingStore.setContent(AzureBlobStorageTestAccount.toMockUri(selfishFile),new byte[]{},metadata); FsPermission justMe=new FsPermission(FsAction.READ_WRITE,FsAction.NONE,FsAction.NONE); FileStatus retrievedStatus=fs.getFileStatus(selfishFile); assertNotNull(retrievedStatus); assertEquals(justMe,retrievedStatus.getPermission()); assertEquals(getExpectedOwner(),retrievedStatus.getOwner()); assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,retrievedStatus.getGroup()); FsPermission meAndYou=new FsPermission(FsAction.READ_WRITE,FsAction.READ_WRITE,FsAction.NONE); fs.setPermission(selfishFile,meAndYou); metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile)); assertNotNull(metadata); String storedPermission=metadata.get("hdi_permission"); assertEquals(getExpectedPermissionString("rw-rw----"),storedPermission); assertNull(metadata.get("asv_permission")); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFolderMetadata() throws Exception { Path folder=new Path("/folder"); FsPermission justRead=new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ); fs.mkdirs(folder,justRead); HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(folder)); assertNotNull(metadata); assertEquals("true",metadata.get("hdi_isfolder")); assertEquals(getExpectedPermissionString("r--r--r--"),metadata.get("hdi_permission")); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("deprecation") @Test public void testPermissionMetadata() throws Exception { FsPermission justMe=new FsPermission(FsAction.READ_WRITE,FsAction.NONE,FsAction.NONE); Path selfishFile=new Path("/noOneElse"); fs.create(selfishFile,justMe,true,4096,fs.getDefaultReplication(),fs.getDefaultBlockSize(),null).close(); HashMap metadata=backingStore.getMetadata(AzureBlobStorageTestAccount.toMockUri(selfishFile)); assertNotNull(metadata); String storedPermission=metadata.get("hdi_permission"); assertEquals(getExpectedPermissionString("rw-------"),storedPermission); FileStatus retrievedStatus=fs.getFileStatus(selfishFile); assertNotNull(retrievedStatus); assertEquals(justMe,retrievedStatus.getPermission()); assertEquals(getExpectedOwner(),retrievedStatus.getOwner()); assertEquals(NativeAzureFileSystem.AZURE_DEFAULT_GROUP_DEFAULT,retrievedStatus.getGroup()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that WASB works well with an older version container with ASV-era * version and metadata. */ @Test public void testFirstContainerVersionMetadata() throws Exception { HashMap containerMetadata=new HashMap(); containerMetadata.put(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY,AzureNativeFileSystemStore.FIRST_WASB_VERSION); FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create(containerMetadata); assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist"))); assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length); assertEquals(AzureNativeFileSystemStore.FIRST_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY)); assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY)); fsWithContainer.getFs().mkdirs(new Path("/dir")); assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY)); assertNull(fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.OLD_VERSION_METADATA_KEY)); fsWithContainer.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that WASB stamped the version in the container metadata if it does a * write operation to a pre-existing container. */ @Test public void testPreExistingContainerVersionMetadata() throws Exception { FsWithPreExistingContainer fsWithContainer=FsWithPreExistingContainer.create(); assertFalse(fsWithContainer.getFs().exists(new Path("/IDontExist"))); assertEquals(0,fsWithContainer.getFs().listStatus(new Path("/")).length); assertNull(fsWithContainer.getContainerMetadata()); fsWithContainer.getFs().mkdirs(new Path("/dir")); assertNotNull(fsWithContainer.getContainerMetadata()); assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,fsWithContainer.getContainerMetadata().get(AzureNativeFileSystemStore.VERSION_METADATA_KEY)); fsWithContainer.close(); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that WASB stamped the version in the container metadata. */ @Test public void testContainerVersionMetadata() throws Exception { fs.createNewFile(new Path("/foo")); HashMap containerMetadata=backingStore.getContainerMetadata(); assertNotNull(containerMetadata); assertEquals(AzureNativeFileSystemStore.CURRENT_WASB_VERSION,containerMetadata.get(AzureNativeFileSystemStore.VERSION_METADATA_KEY)); }

Class: org.apache.hadoop.fs.azure.TestNativeAzureFileSystemBlockLocations

EqualityVerifier 
@Test public void testBlockLocationsSmallFile() throws Exception { BlockLocation[] locations=getBlockLocationsOutput(1,50,0,1); assertEquals(1,locations.length); assertEquals(1,locations[0].getLength()); }

EqualityVerifier 
@Test public void testBlockLocationsOutOfRangeSubsetOfFile() throws Exception { BlockLocation[] locations=getBlockLocationsOutput(205,10,300,10); assertEquals(0,locations.length); }

EqualityVerifier 
@Test public void testBlockLocationsTypical() throws Exception { BlockLocation[] locations=getBlockLocationsOutput(210,50,0,210); assertEquals(5,locations.length); assertEquals("localhost",locations[0].getHosts()[0]); assertEquals(50,locations[0].getLength()); assertEquals(10,locations[4].getLength()); assertEquals(100,locations[2].getOffset()); }

EqualityVerifier 
@Test public void testBlockLocationsDifferentLocationHost() throws Exception { BlockLocation[] locations=getBlockLocationsOutput(100,10,0,100,"myblobhost"); assertEquals(10,locations.length); assertEquals("myblobhost",locations[0].getHosts()[0]); }

EqualityVerifier 
@Test public void testBlockLocationsEmptySubsetOfFile() throws Exception { BlockLocation[] locations=getBlockLocationsOutput(205,10,0,0); assertEquals(0,locations.length); }

EqualityVerifier 
@Test public void testBlockLocationsEmptyFile() throws Exception { BlockLocation[] locations=getBlockLocationsOutput(0,50,0,0); assertEquals(0,locations.length); }

EqualityVerifier 
@Test public void testBlockLocationsExactBlockSizeMultiple() throws Exception { BlockLocation[] locations=getBlockLocationsOutput(200,50,0,200); assertEquals(4,locations.length); assertEquals(150,locations[3].getOffset()); assertEquals(50,locations[3].getLength()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testNumberOfBlocks() throws Exception { Configuration conf=new Configuration(); conf.set(NativeAzureFileSystem.AZURE_BLOCK_SIZE_PROPERTY_NAME,"500"); AzureBlobStorageTestAccount testAccount=AzureBlobStorageTestAccount.createMock(conf); FileSystem fs=testAccount.getFileSystem(); Path testFile=createTestFile(fs,1200); FileStatus stat=fs.getFileStatus(testFile); assertEquals(500,stat.getBlockSize()); testAccount.cleanup(); }

EqualityVerifier 
@Test public void testBlockLocationsSubsetOfFile() throws Exception { BlockLocation[] locations=getBlockLocationsOutput(205,10,15,35); assertEquals(4,locations.length); assertEquals(10,locations[0].getLength()); assertEquals(15,locations[0].getOffset()); assertEquals(5,locations[3].getLength()); assertEquals(45,locations[3].getOffset()); }

Class: org.apache.hadoop.fs.azure.TestNativeAzureFileSystemConcurrency

InternalCallVerifier EqualityVerifier 
/** * Test to make sure that we don't expose the temporary upload folder when * listing at the root. */ @Test public void testNoTempBlobsVisible() throws Exception { Path filePath=new Path("/inProgress"); FSDataOutputStream outputStream=fs.create(filePath); FileStatus[] listOfRoot=fs.listStatus(new Path("/")); assertEquals("Expected one file listed, instead got: " + toString(listOfRoot),1,listOfRoot.length); assertEquals(fs.makeQualified(filePath),listOfRoot[0].getPath()); outputStream.close(); }

Class: org.apache.hadoop.fs.azure.TestOutOfBandAzureBlobOperations

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testImplicitFolderListed() throws Exception { createEmptyBlobOutOfBand("root/b"); FileStatus[] obtained=fs.listStatus(new Path("/root/b")); assertNotNull(obtained); assertEquals(1,obtained.length); assertFalse(obtained[0].isDirectory()); assertEquals("/root/b",obtained[0].getPath().toUri().getPath()); obtained=fs.listStatus(new Path("/root")); assertNotNull(obtained); assertEquals(1,obtained.length); assertFalse(obtained[0].isDirectory()); assertEquals("/root/b",obtained[0].getPath().toUri().getPath()); FileStatus dirStatus=fs.getFileStatus(new Path("/root")); assertNotNull(dirStatus); assertTrue(dirStatus.isDirectory()); assertEquals("/root",dirStatus.getPath().toUri().getPath()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetOwnerOnImplicitFolder() throws Exception { createEmptyBlobOutOfBand("root/b"); fs.setOwner(new Path("/root"),"newOwner",null); FileStatus newStatus=fs.getFileStatus(new Path("/root")); assertNotNull(newStatus); assertEquals("newOwner",newStatus.getOwner()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetPermissionOnImplicitFolder() throws Exception { createEmptyBlobOutOfBand("root/b"); FsPermission newPermission=new FsPermission((short)0600); fs.setPermission(new Path("/root"),newPermission); FileStatus newStatus=fs.getFileStatus(new Path("/root")); assertNotNull(newStatus); assertEquals(newPermission,newStatus.getPermission()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFileAndImplicitFolderSameName() throws Exception { createEmptyBlobOutOfBand("root/b"); createEmptyBlobOutOfBand("root/b/c"); FileStatus[] listResult=fs.listStatus(new Path("/root/b")); assertEquals(1,listResult.length); assertFalse(listResult[0].isDirectory()); try { fs.delete(new Path("/root/b/c"),true); assertTrue("Should've thrown.",false); } catch ( AzureException e) { assertEquals("File /root/b/c has a parent directory /root/b" + " which is also a file. Can't resolve.",e.getMessage()); } }

Class: org.apache.hadoop.fs.azure.TestShellDecryptionKeyProvider

InternalCallVerifier EqualityVerifier 
@Test public void testValidScript() throws Exception { if (!Shell.WINDOWS) { return; } String expectedResult="decretedKey"; File scriptFile=new File(TEST_ROOT_DIR,"testScript.cmd"); FileUtils.writeStringToFile(scriptFile,"@echo %1 " + expectedResult); ShellDecryptionKeyProvider provider=new ShellDecryptionKeyProvider(); Configuration conf=new Configuration(); String account="testacct"; String key="key1"; conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,key); conf.set(ShellDecryptionKeyProvider.KEY_ACCOUNT_SHELLKEYPROVIDER_SCRIPT,"cmd /c " + scriptFile.getAbsolutePath()); String result=provider.getStorageAccountKey(account,conf); assertEquals(key + " " + expectedResult,result); }

Class: org.apache.hadoop.fs.azure.TestWasbFsck

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that we delete dangling files properly */ @Test public void testDelete() throws Exception { Path danglingFile=new Path("/crashedInTheMiddle"); FSDataOutputStream stream=fs.create(danglingFile); stream.write(new byte[]{1,2,3}); stream.flush(); FileStatus fileStatus=fs.getFileStatus(danglingFile); assertNotNull(fileStatus); assertEquals(0,fileStatus.getLen()); assertEquals(1,getNumTempBlobs()); runFsck("-delete"); assertEquals(0,getNumTempBlobs()); assertFalse(fs.exists(danglingFile)); }

Class: org.apache.hadoop.fs.azure.TestWasbUriAndConfiguration

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDefaultKeyProvider() throws Exception { Configuration conf=new Configuration(); String account="testacct"; String key="testkey"; conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,key); String result=AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account,conf); assertEquals(key,result); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testValidKeyProvider() throws Exception { Configuration conf=new Configuration(); String account="testacct"; String key="testkey"; conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account,key); conf.setClass("fs.azure.account.keyprovider." + account,SimpleKeyProvider.class,KeyProvider.class); String result=AzureNativeFileSystemStore.getAccountKeyFromConfiguration(account,conf); assertEquals(key,result); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testConnectUsingSASReadonly() throws Exception { testAccount=AzureBlobStorageTestAccount.create("",EnumSet.of(CreateOptions.UseSas,CreateOptions.CreateContainer,CreateOptions.Readonly)); assumeNotNull(testAccount); final String blobKey="blobForReadonly"; CloudBlobContainer container=testAccount.getRealContainer(); CloudBlockBlob blob=container.getBlockBlobReference(blobKey); ByteArrayInputStream inputStream=new ByteArrayInputStream(new byte[]{1,2,3}); blob.upload(inputStream,3); inputStream.close(); Path filePath=new Path("/" + blobKey); FileSystem fs=testAccount.getFileSystem(); assertTrue(fs.exists(filePath)); byte[] obtained=new byte[3]; DataInputStream obtainedInputStream=fs.open(filePath); obtainedInputStream.readFully(obtained); obtainedInputStream.close(); assertEquals(3,obtained[2]); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests the cases when the URI is specified with no authority, i.e. * wasb:///path/to/file. */ @Test public void testNoUriAuthority() throws Exception { String[] wasbAliases=new String[]{"wasb","wasbs"}; for ( String defaultScheme : wasbAliases) { for ( String wantedScheme : wasbAliases) { testAccount=AzureBlobStorageTestAccount.createMock(); Configuration conf=testAccount.getFileSystem().getConf(); String authority=testAccount.getFileSystem().getUri().getAuthority(); URI defaultUri=new URI(defaultScheme,authority,null,null,null); conf.set("fs.default.name",defaultUri.toString()); URI wantedUri=new URI(wantedScheme + ":///random/path"); NativeAzureFileSystem obtained=(NativeAzureFileSystem)FileSystem.get(wantedUri,conf); assertNotNull(obtained); assertEquals(new URI(wantedScheme,authority,null,null,null),obtained.getUri()); Path qualified=obtained.makeQualified(new Path(wantedUri)); assertEquals(new URI(wantedScheme,authority,wantedUri.getPath(),null,null),qualified.toUri()); testAccount.cleanup(); FileSystem.closeAll(); } } testAccount=AzureBlobStorageTestAccount.createMock(); Configuration conf=testAccount.getFileSystem().getConf(); conf.set("fs.default.name","file:///"); try { FileSystem.get(new URI("wasb:///random/path"),conf); fail("Should've thrown."); } catch ( IllegalArgumentException e) { } }

EqualityVerifier 
@Test public void testConnectUsingAnonymous() throws Exception { testAccount=AzureBlobStorageTestAccount.createAnonymous("testWasb.txt",FILE_SIZE); assumeNotNull(testAccount); assertEquals(FILE_SIZE,readInputStream(new Path("/testWasb.txt"))); }

Class: org.apache.hadoop.fs.azure.metrics.TestAzureFileSystemInstrumentation

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMetricsOnMkdirList() throws Exception { long base=getBaseWebResponses(); assertTrue(fs.mkdirs(new Path("a"))); base=assertWebResponsesInRange(base,1,12); assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_DIRECTORIES_CREATED)); assertEquals(1,fs.listStatus(new Path("/")).length); base=assertWebResponsesEquals(base,1); assertNoErrors(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClientErrorMetrics() throws Exception { String directoryName="metricsTestDirectory_ClientError"; Path directoryPath=new Path("/" + directoryName); assertTrue(fs.mkdirs(directoryPath)); String leaseID=testAccount.acquireShortLease(directoryName); try { try { fs.delete(directoryPath,true); assertTrue("Should've thrown.",false); } catch ( AzureException ex) { assertTrue("Unexpected exception: " + ex,ex.getMessage().contains("lease")); } assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_CLIENT_ERRORS)); assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_SERVER_ERRORS)); } finally { testAccount.releaseLease(leaseID,directoryName); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMetricsOnFileRename() throws Exception { long base=getBaseWebResponses(); Path originalPath=new Path("/metricsTest_RenameStart"); Path destinationPath=new Path("/metricsTest_RenameFinal"); assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED)); assertTrue(fs.createNewFile(originalPath)); logOpResponseCount("Creating an empty file",base); base=assertWebResponsesInRange(base,2,20); assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_CREATED)); assertTrue(fs.rename(originalPath,destinationPath)); logOpResponseCount("Renaming a file",base); base=assertWebResponsesInRange(base,2,15); assertNoErrors(); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMetricsOnFileCreateRead() throws Exception { long base=getBaseWebResponses(); assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation())); Path filePath=new Path("/metricsTest_webResponses"); final int FILE_SIZE=1000; getBandwidthGaugeUpdater().suppressAutoUpdate(); Date start=new Date(); OutputStream outputStream=fs.create(filePath); outputStream.write(nonZeroByteArray(FILE_SIZE)); outputStream.close(); long uploadDurationMs=new Date().getTime() - start.getTime(); logOpResponseCount("Creating a 1K file",base); base=assertWebResponsesInRange(base,2,15); getBandwidthGaugeUpdater().triggerUpdate(true); long bytesWritten=AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation()); assertTrue("The bytes written in the last second " + bytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesWritten > (FILE_SIZE / 2) && bytesWritten < (FILE_SIZE * 2)); long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation()); assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2)); long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE); System.out.println("Upload rate: " + uploadRate + " bytes/second."); long expectedRate=(FILE_SIZE * 1000L) / uploadDurationMs; assertTrue("The upload rate " + uploadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block upload time.",uploadRate >= expectedRate); long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY); System.out.println("Upload latency: " + uploadLatency); long expectedLatency=uploadDurationMs; assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0); assertTrue("The upload latency " + uploadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block upload time.",uploadLatency <= expectedLatency); start=new Date(); InputStream inputStream=fs.open(filePath); int count=0; while (inputStream.read() >= 0) { count++; } inputStream.close(); long downloadDurationMs=new Date().getTime() - start.getTime(); assertEquals(FILE_SIZE,count); logOpResponseCount("Reading a 1K file",base); base=assertWebResponsesInRange(base,1,10); getBandwidthGaugeUpdater().triggerUpdate(false); long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation()); assertEquals(FILE_SIZE,totalBytesRead); long bytesRead=AzureMetricsTestUtil.getCurrentBytesRead(getInstrumentation()); assertTrue("The bytes read in the last second " + bytesRead + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",bytesRead > (FILE_SIZE / 2) && bytesRead < (FILE_SIZE * 2)); long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE); System.out.println("Download rate: " + downloadRate + " bytes/second."); expectedRate=(FILE_SIZE * 1000L) / downloadDurationMs; assertTrue("The download rate " + downloadRate + " is below the expected range of around "+ expectedRate+ " bytes/second that the unit test observed. This should never be"+ " the case since the test underestimates the rate by looking at "+ " end-to-end time instead of just block download time.",downloadRate >= expectedRate); long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY); System.out.println("Download latency: " + downloadLatency); expectedLatency=downloadDurationMs; assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0); assertTrue("The download latency " + downloadLatency + " is more than the expected range of around "+ expectedLatency+ " milliseconds that the unit test observed. This should never be"+ " the case since the test overestimates the latency by looking at "+ " end-to-end time instead of just block download time.",downloadLatency <= expectedLatency); assertNoErrors(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMetricsOnBigFileCreateRead() throws Exception { long base=getBaseWebResponses(); assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(getInstrumentation())); Path filePath=new Path("/metricsTest_webResponses"); final int FILE_SIZE=100 * 1024 * 1024; getBandwidthGaugeUpdater().suppressAutoUpdate(); OutputStream outputStream=fs.create(filePath); outputStream.write(new byte[FILE_SIZE]); outputStream.close(); logOpResponseCount("Creating a 100 MB file",base); base=assertWebResponsesInRange(base,20,50); getBandwidthGaugeUpdater().triggerUpdate(true); long totalBytesWritten=AzureMetricsTestUtil.getCurrentTotalBytesWritten(getInstrumentation()); assertTrue("The total bytes written " + totalBytesWritten + " is pretty far from the expected range of around "+ FILE_SIZE+ " bytes plus a little overhead.",totalBytesWritten >= FILE_SIZE && totalBytesWritten < (FILE_SIZE * 2)); long uploadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_RATE); System.out.println("Upload rate: " + uploadRate + " bytes/second."); long uploadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_UPLOAD_LATENCY); System.out.println("Upload latency: " + uploadLatency); assertTrue("The upload latency " + uploadLatency + " should be greater than zero now that I've just uploaded a file.",uploadLatency > 0); InputStream inputStream=fs.open(filePath); int count=0; while (inputStream.read() >= 0) { count++; } inputStream.close(); assertEquals(FILE_SIZE,count); logOpResponseCount("Reading a 100 MB file",base); base=assertWebResponsesInRange(base,20,40); getBandwidthGaugeUpdater().triggerUpdate(false); long totalBytesRead=AzureMetricsTestUtil.getCurrentTotalBytesRead(getInstrumentation()); assertEquals(FILE_SIZE,totalBytesRead); long downloadRate=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_RATE); System.out.println("Download rate: " + downloadRate + " bytes/second."); long downloadLatency=AzureMetricsTestUtil.getLongGaugeValue(getInstrumentation(),WASB_DOWNLOAD_LATENCY); System.out.println("Download latency: " + downloadLatency); assertTrue("The download latency " + downloadLatency + " should be greater than zero now that I've just downloaded a file.",downloadLatency > 0); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMetricsOnFileExistsDelete() throws Exception { long base=getBaseWebResponses(); Path filePath=new Path("/metricsTest_delete"); assertFalse(fs.exists(filePath)); logOpResponseCount("Checking file existence for non-existent file",base); base=assertWebResponsesInRange(base,1,3); assertTrue(fs.createNewFile(filePath)); base=getCurrentWebResponses(); assertTrue(fs.exists(filePath)); logOpResponseCount("Checking file existence for existent file",base); base=assertWebResponsesInRange(base,1,2); assertEquals(0,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED)); assertTrue(fs.delete(filePath,false)); logOpResponseCount("Deleting a file",base); base=assertWebResponsesInRange(base,1,4); assertEquals(1,AzureMetricsTestUtil.getLongCounterValue(getInstrumentation(),WASB_FILES_DELETED)); assertNoErrors(); }

Class: org.apache.hadoop.fs.azure.metrics.TestBandwidthGaugeUpdater

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleThreaded() throws Exception { AzureFileSystemInstrumentation instrumentation=new AzureFileSystemInstrumentation(new Configuration()); BandwidthGaugeUpdater updater=new BandwidthGaugeUpdater(instrumentation,1000,true); updater.triggerUpdate(true); assertEquals(0,AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation)); updater.blockUploaded(new Date(),new Date(),150); updater.triggerUpdate(true); assertEquals(150,AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation)); updater.blockUploaded(new Date(new Date().getTime() - 10000),new Date(),200); updater.triggerUpdate(true); long currentBytes=AzureMetricsTestUtil.getCurrentBytesWritten(instrumentation); assertTrue("We expect around (200/10 = 20) bytes written as the gauge value." + "Got " + currentBytes,currentBytes > 18 && currentBytes < 22); updater.close(); }

EqualityVerifier 
@Test public void testMultiThreaded() throws Exception { final AzureFileSystemInstrumentation instrumentation=new AzureFileSystemInstrumentation(new Configuration()); final BandwidthGaugeUpdater updater=new BandwidthGaugeUpdater(instrumentation,1000,true); Thread[] threads=new Thread[10]; for (int i=0; i < threads.length; i++) { threads[i]=new Thread(new Runnable(){ @Override public void run(){ updater.blockDownloaded(new Date(),new Date(),10); updater.blockDownloaded(new Date(0),new Date(0),10); } } ); } for ( Thread t : threads) { t.start(); } for ( Thread t : threads) { t.join(); } updater.triggerUpdate(false); assertEquals(10 * threads.length,AzureMetricsTestUtil.getCurrentBytesRead(instrumentation)); updater.close(); }

Class: org.apache.hadoop.fs.azure.metrics.TestNativeAzureFileSystemMetricsSystem

InternalCallVerifier EqualityVerifier 
/** * Tests that when we have multiple file systems created/destroyed * metrics from each are published correctly. * @throws Exception */ @Test public void testMetricsAcrossFileSystems() throws Exception { AzureBlobStorageTestAccount a1, a2, a3; a1=AzureBlobStorageTestAccount.createMock(); assertEquals(0,getFilesCreated(a1)); a2=AzureBlobStorageTestAccount.createMock(); assertEquals(0,getFilesCreated(a2)); a1.getFileSystem().create(new Path("/foo")).close(); a1.getFileSystem().create(new Path("/bar")).close(); a2.getFileSystem().create(new Path("/baz")).close(); assertEquals(0,getFilesCreated(a1)); assertEquals(0,getFilesCreated(a2)); a1.closeFileSystem(); a2.closeFileSystem(); assertEquals(2,getFilesCreated(a1)); assertEquals(1,getFilesCreated(a2)); a3=AzureBlobStorageTestAccount.createMock(); assertEquals(0,getFilesCreated(a3)); a3.closeFileSystem(); assertEquals(0,getFilesCreated(a3)); }

Class: org.apache.hadoop.fs.azure.metrics.TestRollingWindowAverage

InternalCallVerifier EqualityVerifier 
/** * Tests the basic functionality of the class. */ @Test public void testBasicFunctionality() throws Exception { RollingWindowAverage average=new RollingWindowAverage(100); assertEquals(0,average.getCurrentAverage()); average.addPoint(5); assertEquals(5,average.getCurrentAverage()); Thread.sleep(50); average.addPoint(15); assertEquals(10,average.getCurrentAverage()); Thread.sleep(60); assertEquals(15,average.getCurrentAverage()); Thread.sleep(50); assertEquals(0,average.getCurrentAverage()); }

Class: org.apache.hadoop.fs.contract.AbstractContractOpenTest

APIUtilityVerifier EqualityVerifier 
@Test public void testOpenReadZeroByteFile() throws Throwable { describe("create & read a 0 byte file"); Path path=path("zero.txt"); touch(getFileSystem(),path); instream=getFileSystem().open(path); assertEquals(0,instream.getPos()); int result=instream.read(); assertMinusOne("initial byte read",result); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testOpenFileTwice() throws Throwable { describe("verify that two opened file streams are independent"); Path path=path("testopenfiletwice.txt"); byte[] block=dataset(TEST_FILE_LEN,0,255); createFile(getFileSystem(),path,false,block); FSDataInputStream instream1=getFileSystem().open(path); int c=instream1.read(); assertEquals(0,c); FSDataInputStream instream2=null; try { instream2=getFileSystem().open(path); assertEquals("first read of instream 2",0,instream2.read()); assertEquals("second read of instream 1",1,instream1.read()); instream1.close(); assertEquals("second read of instream 2",1,instream2.read()); instream1.close(); } finally { IOUtils.closeStream(instream1); IOUtils.closeStream(instream2); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSequentialRead() throws Throwable { describe("verify that sequential read() operations return values"); Path path=path("testsequentialread.txt"); int len=4; int base=0x40; byte[] block=dataset(len,base,base + len); createFile(getFileSystem(),path,false,block); instream=getFileSystem().open(path); assertEquals(base,instream.read()); assertEquals(base + 1,instream.read()); assertEquals(base + 2,instream.read()); assertEquals(base + 3,instream.read()); assertEquals(-1,instream.read()); assertEquals(-1,instream.read()); instream.close(); }

Class: org.apache.hadoop.fs.contract.AbstractContractSeekTest

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Seek round a file bigger than IO buffers * @throws Throwable */ @Test public void testSeekBigFile() throws Throwable { describe("Seek round a large file and verify the bytes are what is expected"); Path testSeekFile=path("bigseekfile.txt"); byte[] block=dataset(65536,0,255); createFile(getFileSystem(),testSeekFile,false,block); instream=getFileSystem().open(testSeekFile); assertEquals(0,instream.getPos()); instream.seek(0); int result=instream.read(); assertEquals(0,result); assertEquals(1,instream.read()); assertEquals(2,instream.read()); instream.seek(32768); assertEquals("@32768",block[32768],(byte)instream.read()); instream.seek(40000); assertEquals("@40000",block[40000],(byte)instream.read()); instream.seek(8191); assertEquals("@8191",block[8191],(byte)instream.read()); instream.seek(0); assertEquals("@0",0,(byte)instream.read()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSeekAndReadPastEndOfFile() throws Throwable { describe("verify that reading past the last bytes in the file returns -1"); instream=getFileSystem().open(smallSeekFile); assertEquals(0,instream.getPos()); instream.seek(TEST_FILE_LEN - 2); assertTrue("Premature EOF",instream.read() != -1); assertTrue("Premature EOF",instream.read() != -1); assertMinusOne("read past end of file",instream.read()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPositionedBulkReadDoesntChangePosition() throws Throwable { describe("verify that a positioned read does not change the getPos() value"); Path testSeekFile=path("bigseekfile.txt"); byte[] block=dataset(65536,0,255); createFile(getFileSystem(),testSeekFile,false,block); instream=getFileSystem().open(testSeekFile); instream.seek(39999); assertTrue(-1 != instream.read()); assertEquals(40000,instream.getPos()); byte[] readBuffer=new byte[256]; instream.read(128,readBuffer,0,readBuffer.length); assertEquals(40000,instream.getPos()); assertEquals("@40000",block[40000],(byte)instream.read()); for (int i=0; i < 256; i++) { assertEquals("@" + i,block[i + 128],readBuffer[i]); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSeekFile() throws Throwable { describe("basic seek operations"); instream=getFileSystem().open(smallSeekFile); assertEquals(0,instream.getPos()); instream.seek(0); int result=instream.read(); assertEquals(0,result); assertEquals(1,instream.read()); assertEquals(2,instream.getPos()); assertEquals(2,instream.read()); assertEquals(3,instream.getPos()); instream.seek(128); assertEquals(128,instream.getPos()); assertEquals(128,instream.read()); instream.seek(63); assertEquals(63,instream.read()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testNegativeSeek() throws Throwable { instream=getFileSystem().open(smallSeekFile); assertEquals(0,instream.getPos()); try { instream.seek(-1); long p=instream.getPos(); LOG.warn("Seek to -1 returned a position of " + p); int result=instream.read(); fail("expected an exception, got data " + result + " at a position of "+ p); } catch ( EOFException e) { handleExpectedException(e); } catch ( IOException e) { handleRelaxedException("a negative seek","EOFException",e); } assertEquals(0,instream.getPos()); }

APIUtilityVerifier EqualityVerifier 
@Test public void testSeekZeroByteFile() throws Throwable { describe("seek and read a 0 byte file"); instream=getFileSystem().open(zeroByteFile); assertEquals(0,instream.getPos()); int result=instream.read(); assertMinusOne("initial byte read",result); byte[] buffer=new byte[1]; instream.seek(0); result=instream.read(); assertMinusOne("post-seek byte read",result); result=instream.read(buffer,0,1); assertMinusOne("post-seek buffer read",result); }

APIUtilityVerifier EqualityVerifier 
@Test public void testBlockReadZeroByteFile() throws Throwable { describe("do a block read on a 0 byte file"); instream=getFileSystem().open(zeroByteFile); assertEquals(0,instream.getPos()); byte[] buffer=new byte[1]; int result=instream.read(buffer,0,1); assertMinusOne("block read zero byte file",result); }

Class: org.apache.hadoop.fs.contract.AbstractFSContractTestBase

TestInitializer InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Setup: create the contract then init it * @throws Exception on any failure */ @Before public void setup() throws Exception { contract=createContract(createConfiguration()); contract.init(); assumeEnabled(); fileSystem=contract.getTestFileSystem(); assertNotNull("null filesystem",fileSystem); URI fsURI=fileSystem.getUri(); LOG.info("Test filesystem = {} implemented by {}",fsURI,fileSystem); assertEquals("wrong filesystem of " + fsURI,contract.getScheme(),fsURI.getScheme()); testPath=getContract().getTestPath(); mkdirs(testPath); }

Class: org.apache.hadoop.fs.http.server.TestHttpFSServer

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test ACL operations on a directory, including default ACLs. * General strategy is to use GETFILESTATUS and GETACLSTATUS to verify: *
    *
  1. Initial status with no ACLs
  2. *
  3. The addition of a default ACL
  4. *
  5. The removal of default ACLs
  6. *
* @throws Exception */ @Test @TestDir @TestJetty @TestHdfs public void testDirAcls() throws Exception { final String defUser1="default:user:glarch:r-x"; final String defSpec1="aclspec=" + defUser1; final String dir="/aclDirTest"; String statusJson; List aclEntries; createHttpFSServer(false); FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path(dir)); statusJson=getStatus(dir,"GETFILESTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 0); putCmd(dir,"SETACL",defSpec1); statusJson=getStatus(dir,"GETFILESTATUS"); Assert.assertNotEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 5); Assert.assertTrue(aclEntries.contains(defUser1)); putCmd(dir,"REMOVEDEFAULTACL",null); statusJson=getStatus(dir,"GETFILESTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 0); }

APIUtilityVerifier EqualityVerifier 
@Test @TestDir @TestJetty @TestHdfs public void testGlobFilter() throws Exception { createHttpFSServer(false); FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path("/tmp")); fs.create(new Path("/tmp/foo.txt")).close(); String user=HadoopUsersConfTestHelper.getHadoopUsers()[0]; URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*",user)); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream())); reader.readLine(); reader.close(); }

APIUtilityVerifier EqualityVerifier 
@Test @TestDir @TestJetty @TestHdfs public void testPutNoOperation() throws Exception { createHttpFSServer(false); String user=HadoopUsersConfTestHelper.getHadoopUsers()[0]; URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/foo?user.name={0}",user)); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setDoInput(true); conn.setDoOutput(true); conn.setRequestMethod("PUT"); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_BAD_REQUEST); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @TestDir @TestJetty @TestHdfs public void instrumentation() throws Exception { createHttpFSServer(false); URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation","nobody")); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED); url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",HadoopUsersConfTestHelper.getHadoopUsers()[0])); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream())); String line=reader.readLine(); reader.close(); Assert.assertTrue(line.contains("\"counters\":{")); url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",HadoopUsersConfTestHelper.getHadoopUsers()[0])); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_BAD_REQUEST); }

APIUtilityVerifier EqualityVerifier 
@Test @TestDir @TestJetty @TestHdfs public void testOpenOffsetLength() throws Exception { createHttpFSServer(false); byte[] array=new byte[]{0,1,2,3}; FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path("/tmp")); OutputStream os=fs.create(new Path("/tmp/foo")); os.write(array); os.close(); String user=HadoopUsersConfTestHelper.getHadoopUsers()[0]; URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2",user)); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); InputStream is=conn.getInputStream(); Assert.assertEquals(1,is.read()); Assert.assertEquals(2,is.read()); Assert.assertEquals(-1,is.read()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test @TestDir @TestJetty @TestHdfs public void testDelegationTokenOperations() throws Exception { createHttpFSServer(true); URL url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode()); AuthenticationToken token=new AuthenticationToken("u","p",new KerberosDelegationTokenAuthenticationHandler().getType()); token.setExpires(System.currentTimeMillis() + 100000000); Signer signer=new Signer(new StringSignerSecretProvider("secret")); String tokenSigned=signer.sign(token.toString()); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY"); conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty("Cookie",AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETDELEGATIONTOKEN"); conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty("Cookie",AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); JSONObject json=(JSONObject)new JSONParser().parse(new InputStreamReader(conn.getInputStream())); json=(JSONObject)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON); String tokenStr=(String)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode()); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); conn.setRequestProperty("Cookie",AuthenticatedURL.AUTH_COOKIE + "=" + tokenSigned); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode()); }

APIUtilityVerifier EqualityVerifier 
/** * Validate XAttr get/set/remove calls. */ @Test @TestDir @TestJetty @TestHdfs public void testXAttrs() throws Exception { final String name1="user.a1"; final byte[] value1=new byte[]{0x31,0x32,0x33}; final String name2="user.a2"; final byte[] value2=new byte[]{0x41,0x42,0x43}; final String dir="/xattrTest"; final String path=dir + "/file"; createHttpFSServer(false); FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path(dir)); createWithHttp(path,null); String statusJson=getStatus(path,"GETXATTRS"); Map xAttrs=getXAttrs(statusJson); Assert.assertEquals(0,xAttrs.size()); putCmd(path,"SETXATTR",setXAttrParam(name1,value1)); putCmd(path,"SETXATTR",setXAttrParam(name2,value2)); statusJson=getStatus(path,"GETXATTRS"); xAttrs=getXAttrs(statusJson); Assert.assertEquals(2,xAttrs.size()); Assert.assertArrayEquals(value1,xAttrs.get(name1)); Assert.assertArrayEquals(value2,xAttrs.get(name2)); putCmd(path,"REMOVEXATTR","xattr.name=" + name1); statusJson=getStatus(path,"GETXATTRS"); xAttrs=getXAttrs(statusJson); Assert.assertEquals(1,xAttrs.size()); Assert.assertArrayEquals(value2,xAttrs.get(name2)); putCmd(path,"REMOVEXATTR","xattr.name=" + name2); statusJson=getStatus(path,"GETXATTRS"); xAttrs=getXAttrs(statusJson); Assert.assertEquals(0,xAttrs.size()); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Validate the various ACL set/modify/remove calls. General strategy is * to verify each of the following steps with GETFILESTATUS, LISTSTATUS, * and GETACLSTATUS: *
    *
  1. Create a file with no ACLs
  2. *
  3. Add a user + group ACL
  4. *
  5. Add another user ACL
  6. *
  7. Remove the first user ACL
  8. *
  9. Remove all ACLs
  10. *
*/ @Test @TestDir @TestJetty @TestHdfs public void testFileAcls() throws Exception { final String aclUser1="user:foo:rw-"; final String aclUser2="user:bar:r--"; final String aclGroup1="group::r--"; final String aclSpec="aclspec=user::rwx," + aclUser1 + ","+ aclGroup1+ ",other::---"; final String modAclSpec="aclspec=" + aclUser2; final String remAclSpec="aclspec=" + aclUser1; final String dir="/aclFileTest"; final String path=dir + "/test"; String statusJson; List aclEntries; createHttpFSServer(false); FileSystem fs=FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path(dir)); createWithHttp(path,null); statusJson=getStatus(path,"GETFILESTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"LISTSTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(path,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 0); putCmd(path,"SETACL",aclSpec); statusJson=getStatus(path,"GETFILESTATUS"); Assert.assertNotEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"LISTSTATUS"); Assert.assertNotEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(path,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 2); Assert.assertTrue(aclEntries.contains(aclUser1)); Assert.assertTrue(aclEntries.contains(aclGroup1)); putCmd(path,"MODIFYACLENTRIES",modAclSpec); statusJson=getStatus(path,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 3); Assert.assertTrue(aclEntries.contains(aclUser1)); Assert.assertTrue(aclEntries.contains(aclUser2)); Assert.assertTrue(aclEntries.contains(aclGroup1)); putCmd(path,"REMOVEACLENTRIES",remAclSpec); statusJson=getStatus(path,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 2); Assert.assertTrue(aclEntries.contains(aclUser2)); Assert.assertTrue(aclEntries.contains(aclGroup1)); putCmd(path,"REMOVEACL",null); statusJson=getStatus(path,"GETACLSTATUS"); aclEntries=getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 0); statusJson=getStatus(path,"GETFILESTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); statusJson=getStatus(dir,"LISTSTATUS"); Assert.assertEquals(-1,statusJson.indexOf("aclBit")); }

APIUtilityVerifier EqualityVerifier 
@Test @TestDir @TestJetty @TestHdfs public void testHdfsAccess() throws Exception { createHttpFSServer(false); String user=HadoopUsersConfTestHelper.getHadoopUsers()[0]; URL url=new URL(TestJettyHelper.getJettyURL(),MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus",user)); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream())); reader.readLine(); reader.close(); }

Class: org.apache.hadoop.fs.http.server.TestHttpFSWithKerberos

InternalCallVerifier EqualityVerifier 
@Test @TestDir @TestJetty @TestHdfs public void testValidHttpFSAccess() throws Exception { createHttpFSServer(); KerberosTestUtils.doAsClient(new Callable(){ @Override public Void call() throws Exception { URL url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY"); AuthenticatedURL aUrl=new AuthenticatedURL(); AuthenticatedURL.Token aToken=new AuthenticatedURL.Token(); HttpURLConnection conn=aUrl.openConnection(url,aToken); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); return null; } } ); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test @TestDir @TestJetty @TestHdfs public void testDelegationTokenHttpFSAccess() throws Exception { createHttpFSServer(); KerberosTestUtils.doAsClient(new Callable(){ @Override public Void call() throws Exception { URL url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETDELEGATIONTOKEN"); AuthenticatedURL aUrl=new AuthenticatedURL(); AuthenticatedURL.Token aToken=new AuthenticatedURL.Token(); HttpURLConnection conn=aUrl.openConnection(url,aToken); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); JSONObject json=(JSONObject)new JSONParser().parse(new InputStreamReader(conn.getInputStream())); json=(JSONObject)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON); String tokenStr=(String)json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=RENEWDELEGATIONTOKEN&token=" + tokenStr); conn=aUrl.openConnection(url,aToken); conn.setRequestMethod("PUT"); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=CANCELDELEGATIONTOKEN&token=" + tokenStr); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED); return null; } } ); }

APIUtilityVerifier EqualityVerifier 
@Test @TestDir @TestJetty @TestHdfs public void testInvalidadHttpFSAccess() throws Exception { createHttpFSServer(); URL url=new URL(TestJettyHelper.getJettyURL(),"/webhdfs/v1/?op=GETHOMEDIRECTORY"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_UNAUTHORIZED); }

Class: org.apache.hadoop.fs.loadGenerator.TestLoadGenerator

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test if the structure generator works fine */ @Test public void testStructureGenerator() throws Exception { StructureGenerator sg=new StructureGenerator(); String[] args=new String[]{"-maxDepth","2","-minWidth","1","-maxWidth","2","-numOfFiles","2","-avgFileSize","1","-outDir",OUT_DIR.getAbsolutePath(),"-seed","1"}; final int MAX_DEPTH=1; final int MIN_WIDTH=3; final int MAX_WIDTH=5; final int NUM_OF_FILES=7; final int AVG_FILE_SIZE=9; final int SEED=13; try { assertEquals(0,sg.run(args)); BufferedReader in=new BufferedReader(new FileReader(DIR_STRUCTURE_FILE)); assertEquals(DIR_STRUCTURE_FIRST_LINE,in.readLine()); assertEquals(DIR_STRUCTURE_SECOND_LINE,in.readLine()); assertEquals(null,in.readLine()); in.close(); in=new BufferedReader(new FileReader(FILE_STRUCTURE_FILE)); assertEquals(FILE_STRUCTURE_FIRST_LINE,in.readLine()); assertEquals(FILE_STRUCTURE_SECOND_LINE,in.readLine()); assertEquals(null,in.readLine()); in.close(); String oldArg=args[MAX_DEPTH]; args[MAX_DEPTH]="0"; assertEquals(-1,sg.run(args)); args[MAX_DEPTH]=oldArg; oldArg=args[MIN_WIDTH]; args[MIN_WIDTH]="-1"; assertEquals(-1,sg.run(args)); args[MIN_WIDTH]=oldArg; oldArg=args[MAX_WIDTH]; args[MAX_WIDTH]="-1"; assertEquals(-1,sg.run(args)); args[MAX_WIDTH]=oldArg; oldArg=args[NUM_OF_FILES]; args[NUM_OF_FILES]="-1"; assertEquals(-1,sg.run(args)); args[NUM_OF_FILES]=oldArg; oldArg=args[NUM_OF_FILES]; args[NUM_OF_FILES]="-1"; assertEquals(-1,sg.run(args)); args[NUM_OF_FILES]=oldArg; oldArg=args[AVG_FILE_SIZE]; args[AVG_FILE_SIZE]="-1"; assertEquals(-1,sg.run(args)); args[AVG_FILE_SIZE]=oldArg; oldArg=args[SEED]; args[SEED]="34.d4"; assertEquals(-1,sg.run(args)); args[SEED]=oldArg; } finally { DIR_STRUCTURE_FILE.delete(); FILE_STRUCTURE_FILE.delete(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test if the load generator works fine */ @Test public void testLoadGenerator() throws Exception { final String TEST_SPACE_ROOT="/test"; final String SCRIPT_TEST_DIR=OUT_DIR.getAbsolutePath(); String script=SCRIPT_TEST_DIR + "/" + "loadgenscript"; String script2=SCRIPT_TEST_DIR + "/" + "loadgenscript2"; File scriptFile1=new File(script); File scriptFile2=new File(script2); FileWriter writer=new FileWriter(DIR_STRUCTURE_FILE); writer.write(DIR_STRUCTURE_FIRST_LINE + "\n"); writer.write(DIR_STRUCTURE_SECOND_LINE + "\n"); writer.close(); writer=new FileWriter(FILE_STRUCTURE_FILE); writer.write(FILE_STRUCTURE_FIRST_LINE + "\n"); writer.write(FILE_STRUCTURE_SECOND_LINE + "\n"); writer.close(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(3).build(); cluster.waitActive(); try { DataGenerator dg=new DataGenerator(); dg.setConf(CONF); String[] args=new String[]{"-inDir",OUT_DIR.getAbsolutePath(),"-root",TEST_SPACE_ROOT}; assertEquals(0,dg.run(args)); final int READ_PROBABILITY=1; final int WRITE_PROBABILITY=3; final int MAX_DELAY_BETWEEN_OPS=7; final int NUM_OF_THREADS=9; final int START_TIME=11; final int ELAPSED_TIME=13; LoadGenerator lg=new LoadGenerator(); lg.setConf(CONF); args=new String[]{"-readProbability","0.3","-writeProbability","0.3","-root",TEST_SPACE_ROOT,"-maxDelayBetweenOps","0","-numOfThreads","1","-startTime",Long.toString(Time.now()),"-elapsedTime","10"}; assertEquals(0,lg.run(args)); String oldArg=args[READ_PROBABILITY]; args[READ_PROBABILITY]="1.1"; assertEquals(-1,lg.run(args)); args[READ_PROBABILITY]="-1.1"; assertEquals(-1,lg.run(args)); args[READ_PROBABILITY]=oldArg; oldArg=args[WRITE_PROBABILITY]; args[WRITE_PROBABILITY]="1.1"; assertEquals(-1,lg.run(args)); args[WRITE_PROBABILITY]="-1.1"; assertEquals(-1,lg.run(args)); args[WRITE_PROBABILITY]="0.9"; assertEquals(-1,lg.run(args)); args[READ_PROBABILITY]=oldArg; oldArg=args[MAX_DELAY_BETWEEN_OPS]; args[MAX_DELAY_BETWEEN_OPS]="1.x1"; assertEquals(-1,lg.run(args)); args[MAX_DELAY_BETWEEN_OPS]=oldArg; oldArg=args[MAX_DELAY_BETWEEN_OPS]; args[MAX_DELAY_BETWEEN_OPS]="1.x1"; assertEquals(-1,lg.run(args)); args[MAX_DELAY_BETWEEN_OPS]=oldArg; oldArg=args[NUM_OF_THREADS]; args[NUM_OF_THREADS]="-1"; assertEquals(-1,lg.run(args)); args[NUM_OF_THREADS]=oldArg; oldArg=args[START_TIME]; args[START_TIME]="-1"; assertEquals(-1,lg.run(args)); args[START_TIME]=oldArg; oldArg=args[ELAPSED_TIME]; args[ELAPSED_TIME]="-1"; assertEquals(-1,lg.run(args)); args[ELAPSED_TIME]=oldArg; FileWriter fw=new FileWriter(scriptFile1); fw.write("2 .22 .33\n"); fw.write("3 .10 .6\n"); fw.write("6 0 .7\n"); fw.close(); String[] scriptArgs=new String[]{"-root",TEST_SPACE_ROOT,"-maxDelayBetweenOps","0","-numOfThreads","10","-startTime",Long.toString(Time.now()),"-scriptFile",script}; assertEquals(0,lg.run(scriptArgs)); fw=new FileWriter(scriptFile2); fw.write("2 .22 .33\n"); fw.write("3 blah blah blah .6\n"); fw.write("6 0 .7\n"); fw.close(); scriptArgs[scriptArgs.length - 1]=script2; assertEquals(-1,lg.run(scriptArgs)); } finally { cluster.shutdown(); DIR_STRUCTURE_FILE.delete(); FILE_STRUCTURE_FILE.delete(); scriptFile1.delete(); scriptFile2.delete(); } }

Class: org.apache.hadoop.fs.permission.TestAcl

InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testEntryEquals(){ assertNotSame(ENTRY1,ENTRY2); assertNotSame(ENTRY1,ENTRY3); assertNotSame(ENTRY1,ENTRY4); assertNotSame(ENTRY2,ENTRY3); assertNotSame(ENTRY2,ENTRY4); assertNotSame(ENTRY3,ENTRY4); assertEquals(ENTRY1,ENTRY1); assertEquals(ENTRY2,ENTRY2); assertEquals(ENTRY1,ENTRY2); assertEquals(ENTRY2,ENTRY1); assertFalse(ENTRY1.equals(ENTRY3)); assertFalse(ENTRY1.equals(ENTRY4)); assertFalse(ENTRY3.equals(ENTRY4)); assertFalse(ENTRY1.equals(null)); assertFalse(ENTRY1.equals(new Object())); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testStatusHashCode(){ assertEquals(STATUS1.hashCode(),STATUS2.hashCode()); assertFalse(STATUS1.hashCode() == STATUS3.hashCode()); }

EqualityVerifier 
@Test public void testEntryScopeIsAccessIfUnspecified(){ assertEquals(AclEntryScope.ACCESS,ENTRY1.getScope()); assertEquals(AclEntryScope.ACCESS,ENTRY2.getScope()); assertEquals(AclEntryScope.ACCESS,ENTRY3.getScope()); assertEquals(AclEntryScope.DEFAULT,ENTRY4.getScope()); }

InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testStatusEquals(){ assertNotSame(STATUS1,STATUS2); assertNotSame(STATUS1,STATUS3); assertNotSame(STATUS2,STATUS3); assertEquals(STATUS1,STATUS1); assertEquals(STATUS2,STATUS2); assertEquals(STATUS1,STATUS2); assertEquals(STATUS2,STATUS1); assertFalse(STATUS1.equals(STATUS3)); assertFalse(STATUS2.equals(STATUS3)); assertFalse(STATUS1.equals(null)); assertFalse(STATUS1.equals(new Object())); }

EqualityVerifier 
@Test public void testToString(){ assertEquals("user:user1:rwx",ENTRY1.toString()); assertEquals("user:user1:rwx",ENTRY2.toString()); assertEquals("group:group2:rw-",ENTRY3.toString()); assertEquals("default:other::---",ENTRY4.toString()); assertEquals("owner: owner1, group: group1, acl: {entries: [user:user1:rwx, group:group2:rw-, default:other::---], stickyBit: false}",STATUS1.toString()); assertEquals("owner: owner1, group: group1, acl: {entries: [user:user1:rwx, group:group2:rw-, default:other::---], stickyBit: false}",STATUS2.toString()); assertEquals("owner: owner2, group: group2, acl: {entries: [], stickyBit: true}",STATUS3.toString()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEntryHashCode(){ assertEquals(ENTRY1.hashCode(),ENTRY2.hashCode()); assertFalse(ENTRY1.hashCode() == ENTRY3.hashCode()); assertFalse(ENTRY1.hashCode() == ENTRY4.hashCode()); assertFalse(ENTRY3.hashCode() == ENTRY4.hashCode()); }

Class: org.apache.hadoop.fs.shell.TestAclCommands

InternalCallVerifier EqualityVerifier 
@Test public void testLsNoRpcForGetAclStatus() throws Exception { Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"stubfs:///"); conf.setClass("fs.stubfs.impl",StubFileSystem.class,FileSystem.class); conf.setBoolean("stubfs.noRpcForGetAclStatus",true); assertEquals("ls must succeed even if getAclStatus RPC does not exist.",0,ToolRunner.run(conf,new FsShell(),new String[]{"-ls","/"})); }

EqualityVerifier 
@Test public void testMultipleAclSpecParsing() throws Exception { List parsedList=AclEntry.parseAclSpec("group::rwx,user:user1:rwx,user:user2:rw-," + "group:group1:rw-,default:group:group1:rw-",true); AclEntry basicAcl=new AclEntry.Builder().setType(AclEntryType.GROUP).setPermission(FsAction.ALL).build(); AclEntry user1Acl=new AclEntry.Builder().setType(AclEntryType.USER).setPermission(FsAction.ALL).setName("user1").build(); AclEntry user2Acl=new AclEntry.Builder().setType(AclEntryType.USER).setPermission(FsAction.READ_WRITE).setName("user2").build(); AclEntry group1Acl=new AclEntry.Builder().setType(AclEntryType.GROUP).setPermission(FsAction.READ_WRITE).setName("group1").build(); AclEntry defaultAcl=new AclEntry.Builder().setType(AclEntryType.GROUP).setPermission(FsAction.READ_WRITE).setName("group1").setScope(AclEntryScope.DEFAULT).build(); List expectedList=new ArrayList(); expectedList.add(basicAcl); expectedList.add(user1Acl); expectedList.add(user2Acl); expectedList.add(group1Acl); expectedList.add(defaultAcl); assertEquals("Parsed Acl not correct",expectedList,parsedList); }

EqualityVerifier 
@Test public void testMultipleAclSpecParsingWithoutPermissions() throws Exception { List parsedList=AclEntry.parseAclSpec("user::,user:user1:,group::,group:group1:,mask::,other::," + "default:user:user1::,default:mask::",false); AclEntry owner=new AclEntry.Builder().setType(AclEntryType.USER).build(); AclEntry namedUser=new AclEntry.Builder().setType(AclEntryType.USER).setName("user1").build(); AclEntry group=new AclEntry.Builder().setType(AclEntryType.GROUP).build(); AclEntry namedGroup=new AclEntry.Builder().setType(AclEntryType.GROUP).setName("group1").build(); AclEntry mask=new AclEntry.Builder().setType(AclEntryType.MASK).build(); AclEntry other=new AclEntry.Builder().setType(AclEntryType.OTHER).build(); AclEntry defaultUser=new AclEntry.Builder().setScope(AclEntryScope.DEFAULT).setType(AclEntryType.USER).setName("user1").build(); AclEntry defaultMask=new AclEntry.Builder().setScope(AclEntryScope.DEFAULT).setType(AclEntryType.MASK).build(); List expectedList=new ArrayList(); expectedList.add(owner); expectedList.add(namedUser); expectedList.add(group); expectedList.add(namedGroup); expectedList.add(mask); expectedList.add(other); expectedList.add(defaultUser); expectedList.add(defaultMask); assertEquals("Parsed Acl not correct",expectedList,parsedList); }

InternalCallVerifier EqualityVerifier 
@Test public void testLsAclsUnsupported() throws Exception { Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"stubfs:///"); conf.setClass("fs.stubfs.impl",StubFileSystem.class,FileSystem.class); assertEquals("ls must succeed even if FileSystem does not implement ACLs.",0,ToolRunner.run(conf,new FsShell(),new String[]{"-ls","/"})); }

Class: org.apache.hadoop.fs.shell.TestCommandFactory

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetInstances(){ factory.registerCommands(TestRegistrar.class); Command instance; instance=factory.getInstance("blarg"); assertNull(instance); instance=factory.getInstance("tc1"); assertNotNull(instance); assertEquals(TestCommand1.class,instance.getClass()); assertEquals("tc1",instance.getCommandName()); instance=factory.getInstance("tc2"); assertNotNull(instance); assertEquals(TestCommand2.class,instance.getClass()); assertEquals("tc2",instance.getCommandName()); instance=factory.getInstance("tc2.1"); assertNotNull(instance); assertEquals(TestCommand2.class,instance.getClass()); assertEquals("tc2.1",instance.getCommandName()); factory.addClass(TestCommand4.class,"tc4"); instance=factory.getInstance("tc4"); assertNotNull(instance); assertEquals(TestCommand4.class,instance.getClass()); assertEquals("tc4",instance.getCommandName()); String usage=instance.getUsage(); assertEquals("-tc4 tc4_usage",usage); assertEquals("tc4_description",instance.getDescription()); }

InternalCallVerifier EqualityVerifier 
@Test public void testRegistration(){ assertArrayEquals(new String[]{},factory.getNames()); factory.registerCommands(TestRegistrar.class); String[] names=factory.getNames(); assertArrayEquals(new String[]{"tc1","tc2","tc2.1"},names); factory.addClass(TestCommand3.class,"tc3"); names=factory.getNames(); assertArrayEquals(new String[]{"tc1","tc2","tc2.1","tc3"},names); factory.addClass(TestCommand4.class,(new TestCommand4()).getName()); names=factory.getNames(); assertArrayEquals(new String[]{"tc1","tc2","tc2.1","tc3","tc4"},names); }

Class: org.apache.hadoop.fs.shell.TestCopyPreserveFlag

InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testDirectoryCpWithP() throws Exception { run(new Cp(),"-p","d1","d3"); assertEquals(fs.getFileStatus(new Path("d1")).getModificationTime(),fs.getFileStatus(new Path("d3")).getModificationTime()); assertEquals(fs.getFileStatus(new Path("d1")).getPermission(),fs.getFileStatus(new Path("d3")).getPermission()); }

Class: org.apache.hadoop.fs.shell.TestCount

InternalCallVerifier EqualityVerifier 
@Test public void getCommandName(){ Count count=new Count(); String actual=count.getCommandName(); String expected="count"; assertEquals("Count.getCommandName",expected,actual); }

InternalCallVerifier EqualityVerifier 
@Test public void isDeprecated(){ Count count=new Count(); boolean actual=count.isDeprecated(); boolean expected=false; assertEquals("Count.isDeprecated",expected,actual); }

InternalCallVerifier EqualityVerifier 
@Test public void getName(){ Count count=new Count(); String actual=count.getName(); String expected="count"; assertEquals("Count.getName",expected,actual); }

InternalCallVerifier EqualityVerifier 
@Test public void getUsage(){ Count count=new Count(); String actual=count.getUsage(); String expected="-count [-q] [-h] ..."; assertEquals("Count.getUsage",expected,actual); }

InternalCallVerifier EqualityVerifier 
@Test public void getReplacementCommand(){ Count count=new Count(); String actual=count.getReplacementCommand(); String expected=null; assertEquals("Count.getReplacementCommand",expected,actual); }

Class: org.apache.hadoop.fs.shell.TestHdfsTextCommand

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests whether binary Avro data files are displayed correctly. */ @Test public void testDisplayForAvroFiles() throws Exception { createAvroFile(generateWeatherAvroBinaryData()); Configuration conf=fs.getConf(); PathData pathData=new PathData(AVRO_FILENAME.toString(),conf); Display.Text text=new Display.Text(); text.setConf(conf); Method method=text.getClass().getDeclaredMethod("getInputStream",PathData.class); method.setAccessible(true); InputStream stream=(InputStream)method.invoke(text,pathData); String output=inputStreamToString(stream); String expectedOutput="{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" + System.getProperty("line.separator") + "{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}"+ System.getProperty("line.separator")+ "{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}"+ System.getProperty("line.separator"); assertEquals(expectedOutput,output); }

Class: org.apache.hadoop.fs.shell.TestPathData

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testCwdContents() throws Exception { String dirString=Path.CUR_DIR; PathData item=new PathData(dirString,conf); PathData[] items=item.getDirectoryContents(); assertEquals(sortedString("d1","d2"),sortedString(items)); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=30000) public void testRelativeGlob() throws Exception { PathData[] items=PathData.expandAsGlob("d1/f1*",conf); assertEquals(sortedString("d1/f1","d1/f1.1"),sortedString(items)); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=30000) public void testRelativeGlobBack() throws Exception { fs.setWorkingDirectory(new Path("d1")); PathData[] items=PathData.expandAsGlob("../d2/*",conf); assertEquals(sortedString("../d2/f3"),sortedString(items)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testUnqualifiedUriContents() throws Exception { String dirString="d1"; PathData item=new PathData(dirString,conf); PathData[] items=item.getDirectoryContents(); assertEquals(sortedString("d1/f1","d1/f1.1","d1/f2"),sortedString(items)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testQualifiedUriContents() throws Exception { String dirString=fs.makeQualified(new Path("d1")).toString(); PathData item=new PathData(dirString,conf); PathData[] items=item.getDirectoryContents(); assertEquals(sortedString(dirString + "/f1",dirString + "/f1.1",dirString + "/f2"),sortedString(items)); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=30000) public void testAbsoluteGlob() throws Exception { PathData[] items=PathData.expandAsGlob(testDir + "/d1/f1*",conf); assertEquals(sortedString(testDir + "/d1/f1",testDir + "/d1/f1.1"),sortedString(items)); String absolutePathNoDriveLetter=testDir + "/d1/f1"; if (Shell.WINDOWS) { absolutePathNoDriveLetter=absolutePathNoDriveLetter.substring(2); } items=PathData.expandAsGlob(absolutePathNoDriveLetter,conf); assertEquals(sortedString(absolutePathNoDriveLetter),sortedString(items)); items=PathData.expandAsGlob(".",conf); assertEquals(sortedString("."),sortedString(items)); }

EqualityVerifier 
@Test(timeout=5000) public void testToFileRawWindowsPaths() throws Exception { if (!Path.WINDOWS) { return; } String[] winPaths={"n:\\","N:\\","N:\\foo","N:\\foo\\bar","N:/","N:/foo","N:/foo/bar"}; PathData item; for ( String path : winPaths) { item=new PathData(path,conf); assertEquals(new File(path),item.toFile()); } item=new PathData("foo\\bar",conf); assertEquals(new File(testDir + "\\foo\\bar"),item.toFile()); }

EqualityVerifier 
@Test(timeout=30000) public void testWithStringAndConfForBuggyPath() throws Exception { String dirString="file:///tmp"; Path tmpDir=new Path(dirString); PathData item=new PathData(dirString,conf); assertEquals("file:/tmp",tmpDir.toString()); checkPathData(dirString,item); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testToFile() throws Exception { PathData item=new PathData(".",conf); assertEquals(new File(testDir.toString()),item.toFile()); item=new PathData("d1/f1",conf); assertEquals(new File(testDir + "/d1/f1"),item.toFile()); item=new PathData(testDir + "/d1/f1",conf); assertEquals(new File(testDir + "/d1/f1"),item.toFile()); }

Class: org.apache.hadoop.fs.shell.TestPathExceptions

InternalCallVerifier EqualityVerifier 
@Test public void testWithThrowable() throws Exception { IOException ioe=new IOException("KABOOM"); PathIOException pe=new PathIOException(path,ioe); assertEquals(new Path(path),pe.getPath()); assertEquals("`" + path + "': Input/output error: "+ error,pe.getMessage()); }

InternalCallVerifier EqualityVerifier 
@Test public void testWithDefaultString() throws Exception { PathIOException pe=new PathIOException(path); assertEquals(new Path(path),pe.getPath()); assertEquals("`" + path + "': Input/output error",pe.getMessage()); }

InternalCallVerifier EqualityVerifier 
@Test public void testWithCustomString() throws Exception { PathIOException pe=new PathIOException(path,error); assertEquals(new Path(path),pe.getPath()); assertEquals("`" + path + "': "+ error,pe.getMessage()); }

Class: org.apache.hadoop.fs.shell.TestTextCommand

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests whether binary Avro data files are displayed correctly. */ @Test(timeout=30000) public void testDisplayForAvroFiles() throws Exception { createAvroFile(generateWeatherAvroBinaryData()); Configuration conf=new Configuration(); URI localPath=new URI(AVRO_FILENAME); PathData pathData=new PathData(localPath,conf); Display.Text text=new Display.Text(); text.setConf(conf); Method method=text.getClass().getDeclaredMethod("getInputStream",PathData.class); method.setAccessible(true); InputStream stream=(InputStream)method.invoke(text,pathData); String output=inputStreamToString(stream); String expectedOutput="{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" + System.getProperty("line.separator") + "{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}"+ System.getProperty("line.separator")+ "{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}"+ System.getProperty("line.separator")+ "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}"+ System.getProperty("line.separator"); assertEquals(expectedOutput,output); }

Class: org.apache.hadoop.fs.slive.TestSlive

EqualityVerifier 
@Test public void testSelection() throws Exception { ConfigExtractor extractor=getTestConfig(false); WeightSelector selector=new WeightSelector(extractor,rnd); int expected=OperationType.values().length; Operation op=null; Set types=new HashSet(); FileSystem fs=FileSystem.get(extractor.getConfig()); while (true) { op=selector.select(1,1); if (op == null) { break; } op.run(fs); types.add(op.getType()); } assertEquals(types.size(),expected); }

InternalCallVerifier EqualityVerifier 
@Test public void testArguments() throws Exception { ConfigExtractor extractor=getTestConfig(true); assertEquals(extractor.getOpCount().intValue(),Constants.OperationType.values().length); assertEquals(extractor.getMapAmount().intValue(),2); assertEquals(extractor.getReducerAmount().intValue(),2); Range apRange=extractor.getAppendSize(); assertEquals(apRange.getLower().intValue(),Constants.MEGABYTES * 1); assertEquals(apRange.getUpper().intValue(),Constants.MEGABYTES * 2); Range wRange=extractor.getWriteSize(); assertEquals(wRange.getLower().intValue(),Constants.MEGABYTES * 1); assertEquals(wRange.getUpper().intValue(),Constants.MEGABYTES * 2); Range bRange=extractor.getBlockSize(); assertEquals(bRange.getLower().intValue(),Constants.MEGABYTES * 1); assertEquals(bRange.getUpper().intValue(),Constants.MEGABYTES * 2); String resfile=extractor.getResultFile(); assertEquals(resfile,getResultFile().toString()); int durationMs=extractor.getDurationMilliseconds(); assertEquals(durationMs,10 * 1000); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDataWriting() throws Exception { long byteAm=100; File fn=getTestFile(); DataWriter writer=new DataWriter(rnd); FileOutputStream fs=new FileOutputStream(fn); GenerateOutput ostat=writer.writeSegment(byteAm,fs); LOG.info(ostat); fs.close(); assertTrue(ostat.getBytesWritten() == byteAm); DataVerifier vf=new DataVerifier(); FileInputStream fin=new FileInputStream(fn); VerifyOutput vfout=vf.verifyFile(byteAm,new DataInputStream(fin)); LOG.info(vfout); fin.close(); assertEquals(vfout.getBytesRead(),byteAm); assertTrue(vfout.getChunksDifferent() == 0); }

InternalCallVerifier EqualityVerifier 
@Test public void testRange(){ Range r=new Range(10L,20L); assertEquals(r.getLower().longValue(),10L); assertEquals(r.getUpper().longValue(),20L); }

Class: org.apache.hadoop.fs.swift.TestFSMainOperationsSwift

EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) @Override public void testWDAbsolute() throws IOException { Path absoluteDir=getTestRootPath(fSys,"test/existingDir"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); }

Class: org.apache.hadoop.fs.swift.TestReadPastBuffer

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Seek past the buffer then read * @throws Throwable problems */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable { instream=fs.open(readFile); assertEquals(0,instream.getPos()); instream.seek(SEEK_FILE_LEN - 2); assertTrue("Premature EOF",instream.read() != -1); assertTrue("Premature EOF",instream.read() != -1); assertMinusOne("read past end of file",instream.read()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Seek past the buffer and attempt a read(buffer) * @throws Throwable failures */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekBulkReadPastEndOfFile() throws Throwable { instream=fs.open(readFile); assertEquals(0,instream.getPos()); instream.seek(SEEK_FILE_LEN - 1); byte[] buffer=new byte[1]; int result=instream.read(buffer,0,1); result=instream.read(buffer,0,1); assertMinusOne("read past end of file",result); result=instream.read(buffer,0,1); assertMinusOne("read past end of file",result); result=instream.read(buffer,0,0); assertEquals("EOF checks coming before read range check",0,result); }

Class: org.apache.hadoop.fs.swift.TestSeek

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testPositionedBulkReadDoesntChangePosition() throws Throwable { Path testSeekFile=new Path(testPath,"bigseekfile.txt"); byte[] block=SwiftTestUtils.dataset(65536,0,255); createFile(testSeekFile,block); instream=fs.open(testSeekFile); instream.seek(39999); assertTrue(-1 != instream.read()); assertEquals(40000,instream.getPos()); byte[] readBuffer=new byte[256]; instream.read(128,readBuffer,0,readBuffer.length); assertEquals(40000,instream.getPos()); assertEquals("@40000",block[40000],(byte)instream.read()); for (int i=0; i < 256; i++) { assertEquals("@" + i,block[i + 128],readBuffer[i]); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekAndReadPastEndOfFile() throws Throwable { instream=fs.open(smallSeekFile); assertEquals(0,instream.getPos()); instream.seek(SMALL_SEEK_FILE_LEN - 2); assertTrue("Premature EOF",instream.read() != -1); assertTrue("Premature EOF",instream.read() != -1); assertMinusOne("read past end of file",instream.read()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testNegativeSeek() throws Throwable { instream=fs.open(smallSeekFile); assertEquals(0,instream.getPos()); try { instream.seek(-1); long p=instream.getPos(); LOG.warn("Seek to -1 returned a position of " + p); int result=instream.read(); fail("expected an exception, got data " + result + " at a position of "+ p); } catch ( IOException e) { } assertEquals(0,instream.getPos()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekBigFile() throws Throwable { Path testSeekFile=new Path(testPath,"bigseekfile.txt"); byte[] block=SwiftTestUtils.dataset(65536,0,255); createFile(testSeekFile,block); instream=fs.open(testSeekFile); assertEquals(0,instream.getPos()); instream.seek(0); int result=instream.read(); assertEquals(0,result); assertEquals(1,instream.read()); assertEquals(2,instream.read()); instream.seek(32768); assertEquals("@32768",block[32768],(byte)instream.read()); instream.seek(40000); assertEquals("@40000",block[40000],(byte)instream.read()); instream.seek(8191); assertEquals("@8191",block[8191],(byte)instream.read()); instream.seek(0); assertEquals("@0",0,(byte)instream.read()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekFile() throws Throwable { instream=fs.open(smallSeekFile); assertEquals(0,instream.getPos()); instream.seek(0); int result=instream.read(); assertEquals(0,result); assertEquals(1,instream.read()); assertEquals(2,instream.getPos()); assertEquals(2,instream.read()); assertEquals(3,instream.getPos()); instream.seek(128); assertEquals(128,instream.getPos()); assertEquals(128,instream.read()); instream.seek(63); assertEquals(63,instream.read()); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testSeekZeroByteFile() throws Throwable { instream=fs.open(zeroByteFile); assertEquals(0,instream.getPos()); int result=instream.read(); assertMinusOne("initial byte read",result); byte[] buffer=new byte[1]; instream.seek(0); result=instream.read(); assertMinusOne("post-seek byte read",result); result=instream.read(buffer,0,1); assertMinusOne("post-seek buffer read",result); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testBlockReadZeroByteFile() throws Throwable { instream=fs.open(zeroByteFile); assertEquals(0,instream.getPos()); byte[] buffer=new byte[1]; int result=instream.read(buffer,0,1); assertMinusOne("block read zero byte file",result); }

Class: org.apache.hadoop.fs.swift.TestSwiftConfig

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testProxyData() throws Exception { final Configuration configuration=createCoreConfig(); String proxy="web-proxy"; int port=8088; configuration.set(SWIFT_PROXY_HOST_PROPERTY,proxy); configuration.set(SWIFT_PROXY_PORT_PROPERTY,Integer.toString(port)); SwiftRestClient restClient=mkInstance(configuration); assertEquals(proxy,restClient.getProxyHost()); assertEquals(port,restClient.getProxyPort()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testPositivePartsize() throws Exception { final Configuration configuration=createCoreConfig(); int size=127; configuration.set(SWIFT_PARTITION_SIZE,Integer.toString(size)); SwiftRestClient restClient=mkInstance(configuration); assertEquals(size,restClient.getPartSizeKB()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testPositiveBlocksize() throws Exception { final Configuration configuration=createCoreConfig(); int size=127; configuration.set(SWIFT_BLOCKSIZE,Integer.toString(size)); SwiftRestClient restClient=mkInstance(configuration); assertEquals(size,restClient.getBlocksizeKB()); }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemBasicOps

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testOverwrite() throws Throwable { Path path=new Path("/test/Overwrite"); try { String text="Testing a put to a file " + System.currentTimeMillis(); writeTextFile(fs,path,text,false); assertFileHasLength(fs,path,text.length()); String text2="Overwriting a file " + System.currentTimeMillis(); writeTextFile(fs,path,text2,true); assertFileHasLength(fs,path,text2.length()); String result=readBytesToString(fs,path,text2.length()); assertEquals(text2,result); } finally { delete(fs,path); } }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testPutGetFile() throws Throwable { Path path=new Path("/test/PutGetFile"); try { String text="Testing a put and get to a file " + System.currentTimeMillis(); writeTextFile(fs,path,text,false); String result=readBytesToString(fs,path,text.length()); assertEquals(text,result); } finally { delete(fs,path); } }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemDirectories

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test that a dir off root has a listStatus() call that * works as expected. and that when a child is added. it changes * @throws Exception on failures */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesOffRootHaveMatchingFileStatus() throws Exception { Path test=path("/test"); fs.delete(test,true); mkdirs(test); assertExists("created test directory",test); FileStatus[] statuses=fs.listStatus(test); String statusString=statusToString(test.toString(),statuses); assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length); Path src=path("/test/file"); SwiftTestUtils.touch(fs,src); statuses=fs.listStatus(test); statusString=statusToString(test.toString(),statuses); assertEquals("Wrong number of elements in file status " + statusString,1,statuses.length); SwiftFileStatus stat=(SwiftFileStatus)statuses[0]; assertTrue("isDir(): Not a directory: " + stat,stat.isDir()); extraStatusAssertions(stat); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * test that a dir two levels down has a listStatus() call that * works as expected. * @throws Exception on failures */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testDirectoriesLowerDownHaveMatchingFileStatus() throws Exception { Path test=path("/test/testDirectoriesLowerDownHaveMatchingFileStatus"); fs.delete(test,true); mkdirs(test); assertExists("created test sub directory",test); FileStatus[] statuses=fs.listStatus(test); String statusString=statusToString(test.toString(),statuses); assertEquals("Wrong number of elements in file status " + statusString,0,statuses.length); }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemExtendedContract

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testWriteReadFile() throws Exception { final Path f=new Path("/test/test"); final FSDataOutputStream fsDataOutputStream=fs.create(f); final String message="Test string"; fsDataOutputStream.write(message.getBytes()); fsDataOutputStream.close(); assertExists("created file",f); FSDataInputStream open=null; try { open=fs.open(f); final byte[] bytes=new byte[512]; final int read=open.read(bytes); final byte[] buffer=new byte[read]; System.arraycopy(bytes,0,buffer,0,read); assertEquals(message,new String(buffer)); } finally { fs.delete(f,false); IOUtils.closeStream(open); } }

EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testGetSchemeImplemented() throws Throwable { String scheme=fs.getScheme(); assertEquals(SwiftNativeFileSystem.SWIFT,scheme); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Assert that a filesystem is case sensitive. * This is done by creating a mixed-case filename and asserting that * its lower case version is not there. * @throws Exception failures */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testFilesystemIsCaseSensitive() throws Exception { String mixedCaseFilename="/test/UPPER.TXT"; Path upper=path(mixedCaseFilename); Path lower=path(mixedCaseFilename.toLowerCase(Locale.ENGLISH)); assertFalse("File exists" + upper,fs.exists(upper)); assertFalse("File exists" + lower,fs.exists(lower)); FSDataOutputStream out=fs.create(upper); out.writeUTF("UPPER"); out.close(); FileStatus upperStatus=fs.getFileStatus(upper); assertExists("Original upper case file" + upper,upper); assertPathDoesNotExist("lower case file",lower); out=fs.create(lower); out.writeUTF("l"); out.close(); assertExists("lower case file",lower); assertExists("Original upper case file " + upper,upper); FileStatus newStatus=fs.getFileStatus(upper); assertEquals("Expected status:" + upperStatus + " actual status "+ newStatus,upperStatus.getLen(),newStatus.getLen()); }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemLsOperations

EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListStatusEmptyDirectory() throws Exception { createTestSubdirs(); FileStatus[] paths; paths=fs.listStatus(path("/test/hadoop/a")); assertEquals(dumpStats("/test/hadoop/a",paths),0,paths.length); }

EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListEmptyRoot() throws Throwable { describe("Empty the root dir and verify that an LS / returns {}"); cleanup("testListEmptyRoot",fs,"/test"); cleanup("testListEmptyRoot",fs,"/user"); FileStatus[] fileStatuses=fs.listStatus(path("/")); assertEquals("Non-empty root" + dumpStats("/",fileStatuses),0,fileStatuses.length); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListLevelTestHadoop() throws Exception { createTestSubdirs(); FileStatus[] paths; paths=fs.listStatus(path("/test/hadoop")); String stats=dumpStats("/test/hadoop",paths); assertEquals("Paths.length wrong in " + stats,3,paths.length); assertEquals("Path element[0] wrong: " + stats,path("/test/hadoop/a"),paths[0].getPath()); assertEquals("Path element[1] wrong: " + stats,path("/test/hadoop/b"),paths[1].getPath()); assertEquals("Path element[2] wrong: " + stats,path("/test/hadoop/c"),paths[2].getPath()); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListNonEmptyRoot() throws Throwable { Path test=path("/test"); touch(fs,test); FileStatus[] fileStatuses=fs.listStatus(path("/")); String stats=dumpStats("/",fileStatuses); assertEquals("Wrong #of root children" + stats,1,fileStatuses.length); FileStatus status=fileStatuses[0]; assertEquals("Wrong path value" + stats,test,status.getPath()); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListStatusFile() throws Exception { describe("Create a single file under /test;" + " assert that listStatus(/test) finds it"); Path file=path("/test/filename"); createFile(file); FileStatus[] pathStats=fs.listStatus(file); assertEquals(dumpStats("/test/",pathStats),1,pathStats.length); FileStatus lsStat=pathStats[0]; assertEquals("Wrong file len in listing of " + lsStat,data.length,lsStat.getLen()); }

EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testListLevelTest() throws Exception { createTestSubdirs(); FileStatus[] paths=fs.listStatus(path("/test")); assertEquals(dumpStats("/test",paths),1,paths.length); assertEquals(path("/test/hadoop"),paths[0].getPath()); }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemPartitionedUploads

EqualityVerifier 
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testPartitionPropertyPropagatesToConf() throws Throwable { assertEquals(1,getConf().getInt(SwiftProtocolConstants.SWIFT_PARTITION_SIZE,0)); }

APIUtilityVerifier EqualityVerifier 
/** * Test that when a partitioned file is overwritten by a smaller one, * all the old partitioned files go away * @throws Throwable */ @Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testOverwritePartitionedFile() throws Throwable { final Path path=new Path("/test/testOverwritePartitionedFile"); final int len1=8192; final byte[] src1=SwiftTestUtils.dataset(len1,'A','Z'); FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,1024); out.write(src1,0,len1); out.close(); long expected=getExpectedPartitionsWritten(len1,PART_SIZE_BYTES,false); assertPartitionsWritten("initial upload",out,expected); assertExists("Exists",path); FileStatus status=fs.getFileStatus(path); assertEquals("Length",len1,status.getLen()); final int len2=4095; final byte[] src2=SwiftTestUtils.dataset(len2,'a','z'); out=fs.create(path,true,getBufferSize(),(short)1,1024); out.write(src2,0,len2); out.close(); status=fs.getFileStatus(path); assertEquals("Length",len2,status.getLen()); byte[] dest=readDataset(fs,path,len2); SwiftTestUtils.compareByteArrays(src2,dest,len2); }

EqualityVerifier 
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testPartionPropertyPropagatesToStore() throws Throwable { assertEquals(1,fs.getStore().getPartsizeKB()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test sticks up a very large partitioned file and verifies that * it comes back unchanged. * @throws Throwable */ @Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testManyPartitionedFile() throws Throwable { final Path path=new Path("/test/testManyPartitionedFile"); int len=PART_SIZE_BYTES * 15; final byte[] src=SwiftTestUtils.dataset(len,32,144); FSDataOutputStream out=fs.create(path,false,getBufferSize(),(short)1,BLOCK_SIZE); out.write(src,0,src.length); int expected=getExpectedPartitionsWritten(len,PART_SIZE_BYTES,true); out.close(); assertPartitionsWritten("write completed",out,expected); assertEquals("too few bytes written",len,SwiftNativeFileSystem.getBytesWritten(out)); assertEquals("too few bytes uploaded",len,SwiftNativeFileSystem.getBytesUploaded(out)); byte[] dest=readDataset(fs,path,len); SwiftTestUtils.compareByteArrays(src,dest,len); FileStatus[] stats=fs.listStatus(path); assertEquals("wrong entry count in " + SwiftTestUtils.dumpStats(path.toString(),stats),expected,stats.length); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testRenamePartitionedFile() throws Throwable { Path src=new Path("/test/testRenamePartitionedFileSrc"); int len=data.length; SwiftTestUtils.writeDataset(fs,src,data,len,1024,false); assertExists("Exists",src); String partOneName=SwiftUtils.partitionFilenameFromNumber(1); Path srcPart=new Path(src,partOneName); Path dest=new Path("/test/testRenamePartitionedFileDest"); Path destPart=new Path(src,partOneName); assertExists("Partition Exists",srcPart); fs.rename(src,dest); assertPathExists(fs,"dest file missing",dest); FileStatus status=fs.getFileStatus(dest); assertEquals("Length of renamed file is wrong",len,status.getLen()); byte[] destData=readDataset(fs,dest,len); SwiftTestUtils.compareByteArrays(data,destData,len); String srcLs=SwiftTestUtils.ls(fs,src); String destLs=SwiftTestUtils.ls(fs,dest); assertPathDoesNotExist("deleted file still found in " + srcLs,src); assertPathDoesNotExist("partition file still found in " + srcLs,srcPart); }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemRead

APIUtilityVerifier EqualityVerifier 
/** * Read and write some JSON * @throws IOException */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testRWJson() throws IOException { final String message="{" + " 'json': { 'i':43, 'b':true}," + " 's':'string'"+ "}"; final Path filePath=new Path("/test/file.json"); writeTextFile(fs,filePath,message,false); String readJson=readBytesToString(fs,filePath,message.length()); assertEquals(message,readJson); FileStatus status=fs.getFileStatus(filePath); BlockLocation[] locations=fs.getFileBlockLocations(status,0,10); }

APIUtilityVerifier EqualityVerifier 
/** * Read and write some XML * @throws IOException */ @Test(timeout=SWIFT_TEST_TIMEOUT) public void testRWXML() throws IOException { final String message="" + " " + " string"+ ""; final Path filePath=new Path("/test/file.xml"); writeTextFile(fs,filePath,message,false); String read=readBytesToString(fs,filePath,message.length()); assertEquals(message,read); }

Class: org.apache.hadoop.fs.swift.TestSwiftFileSystemRename

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testRenameFile() throws Exception { assumeRenameSupported(); final Path old=new Path("/test/alice/file"); final Path newPath=new Path("/test/bob/file"); fs.mkdirs(newPath.getParent()); final FSDataOutputStream fsDataOutputStream=fs.create(old); final byte[] message="Some data".getBytes(); fsDataOutputStream.write(message); fsDataOutputStream.close(); assertTrue(fs.exists(old)); rename(old,newPath,true,false,true); final FSDataInputStream bobStream=fs.open(newPath); final byte[] bytes=new byte[512]; final int read=bobStream.read(bytes); bobStream.close(); final byte[] buffer=new byte[read]; System.arraycopy(bytes,0,buffer,0,read); assertEquals(new String(message),new String(buffer)); }

Class: org.apache.hadoop.fs.swift.TestSwiftObjectPath

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testParseUrlPath() throws Exception { final String pathString="swift://container.service1/home/user/files/file1"; final URI uri=new URI(pathString); final Path path=new Path(pathString); final SwiftObjectPath expected=SwiftObjectPath.fromPath(uri,path); final SwiftObjectPath actual=new SwiftObjectPath(RestClientBindings.extractContainerName(uri),"/home/user/files/file1"); assertEquals(expected,actual); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testParsePath() throws Exception { final String pathString="/home/user/files/file1"; final Path path=new Path(pathString); final URI uri=new URI("http://container.localhost"); final SwiftObjectPath expected=SwiftObjectPath.fromPath(uri,path); final SwiftObjectPath actual=new SwiftObjectPath(RestClientBindings.extractContainerName(uri),pathString); assertEquals(expected,actual); }

EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testHandleUrlAsPath() throws Exception { final String hostPart="swift://container.service1"; final String pathPart="/home/user/files/file1"; final String uriString=hostPart + pathPart; final SwiftObjectPath expected=new SwiftObjectPath(uriString,pathPart); final SwiftObjectPath actual=new SwiftObjectPath(uriString,uriString); assertEquals(expected,actual); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=SWIFT_TEST_TIMEOUT) public void testParseAuthenticatedUrl() throws Exception { final String pathString="swift://container.service1/v2/AUTH_00345h34l93459y4/home/tom/documents/finance.docx"; final URI uri=new URI(pathString); final Path path=new Path(pathString); final SwiftObjectPath expected=SwiftObjectPath.fromPath(uri,path); final SwiftObjectPath actual=new SwiftObjectPath(RestClientBindings.extractContainerName(uri),"/home/tom/documents/finance.docx"); assertEquals(expected,actual); }

Class: org.apache.hadoop.fs.swift.scale.TestWriteManySmallFiles

APIUtilityVerifier IterativeVerifier EqualityVerifier 
@Test(timeout=SWIFT_BULK_IO_TEST_TIMEOUT) public void testScaledWriteThenRead() throws Throwable { Path dir=new Path("/test/manysmallfiles"); Duration rm1=new Duration(); fs.delete(dir,true); rm1.finished(); fs.mkdirs(dir); Duration ls1=new Duration(); fs.listStatus(dir); ls1.finished(); long count=getOperationCount(); SwiftTestUtils.noteAction("Beginning Write of " + count + " files "); DurationStats writeStats=new DurationStats("write"); DurationStats readStats=new DurationStats("read"); String format="%08d"; for (long l=0; l < count; l++) { String name=String.format(format,l); Path p=new Path(dir,"part-" + name); Duration d=new Duration(); SwiftTestUtils.writeTextFile(fs,p,name,false); d.finished(); writeStats.add(d); Thread.sleep(1000); } SwiftTestUtils.noteAction("Beginning ls"); Duration ls2=new Duration(); FileStatus[] status2=(FileStatus[])fs.listStatus(dir); ls2.finished(); assertEquals("Not enough entries in the directory",count,status2.length); SwiftTestUtils.noteAction("Beginning read"); for (long l=0; l < count; l++) { String name=String.format(format,l); Path p=new Path(dir,"part-" + name); Duration d=new Duration(); String result=SwiftTestUtils.readBytesToString(fs,p,name.length()); assertEquals(name,result); d.finished(); readStats.add(d); } SwiftTestUtils.noteAction("Beginning delete"); Duration rm2=new Duration(); fs.delete(dir,true); rm2.finished(); LOG.info(String.format("'filesystem','%s'",fs.getUri())); LOG.info(writeStats.toString()); LOG.info(readStats.toString()); LOG.info(String.format("'rm1',%d,'ls1',%d",rm1.value(),ls1.value())); LOG.info(String.format("'rm2',%d,'ls2',%d",rm2.value(),ls2.value())); }

Class: org.apache.hadoop.fs.viewfs.TestChRootedFileSystem

InternalCallVerifier EqualityVerifier 
@Test public void testResolvePath() throws IOException { Assert.assertEquals(chrootedTo,fSys.resolvePath(new Path("/"))); fileSystemTestHelper.createFile(fSys,"/foo"); Assert.assertEquals(new Path(chrootedTo,"foo"),fSys.resolvePath(new Path("/foo"))); }

InternalCallVerifier EqualityVerifier 
@Test public void testURI(){ URI uri=fSys.getUri(); Assert.assertEquals(chrootedTo.toUri(),uri); }

InternalCallVerifier EqualityVerifier 
@Test public void testBasicPaths(){ URI uri=fSys.getUri(); Assert.assertEquals(chrootedTo.toUri(),uri); Assert.assertEquals(fSys.makeQualified(new Path(System.getProperty("user.home"))),fSys.getWorkingDirectory()); Assert.assertEquals(fSys.makeQualified(new Path(System.getProperty("user.home"))),fSys.getHomeDirectory()); Assert.assertEquals(new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI,null),fSys.makeQualified(new Path("/foo/bar"))); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWorkingDirectory() throws Exception { fSys.mkdirs(new Path("/testWd")); Path workDir=new Path("/testWd"); fSys.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); fSys.setWorkingDirectory(new Path(".")); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); fSys.setWorkingDirectory(new Path("..")); Assert.assertEquals(workDir.getParent(),fSys.getWorkingDirectory()); workDir=new Path("/testWd"); fSys.setWorkingDirectory(workDir); Assert.assertEquals(workDir,fSys.getWorkingDirectory()); Path relativeDir=new Path("existingDir1"); Path absoluteDir=new Path(workDir,"existingDir1"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(relativeDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); absoluteDir=new Path("/test/existingDir2"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); Path absoluteFooPath=new Path(absoluteDir,"foo"); fSys.create(absoluteFooPath).close(); fSys.open(new Path("foo")).close(); fSys.mkdirs(new Path("newDir")); Assert.assertTrue(fSys.isDirectory(new Path(absoluteDir,"newDir"))); final String LOCAL_FS_ROOT_URI="file:///tmp/test"; absoluteDir=new Path(LOCAL_FS_ROOT_URI + "/existingDir"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetContentSummary() throws IOException { fSys.mkdirs(new Path("/newDir/dirFoo")); ContentSummary cs=fSys.getContentSummary(new Path("/newDir/dirFoo")); Assert.assertEquals(-1L,cs.getQuota()); Assert.assertEquals(-1L,cs.getSpaceQuota()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testList() throws IOException { FileStatus fs=fSys.getFileStatus(new Path("/")); Assert.assertTrue(fs.isDirectory()); Assert.assertEquals(fs.getPath(),chrootedTo); FileStatus[] dirPaths=fSys.listStatus(new Path("/")); Assert.assertEquals(0,dirPaths.length); fileSystemTestHelper.createFile(fSys,"/foo"); fileSystemTestHelper.createFile(fSys,"/bar"); fSys.mkdirs(new Path("/dirX")); fSys.mkdirs(fileSystemTestHelper.getTestRootPath(fSys,"/dirY")); fSys.mkdirs(new Path("/dirX/dirXX")); dirPaths=fSys.listStatus(new Path("/")); Assert.assertEquals(4,dirPaths.length); fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"foo"),dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"bar"),dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"dirX"),dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); fs=FileSystemTestHelper.containsPath(new Path(chrootedTo,"dirY"),dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); }

Class: org.apache.hadoop.fs.viewfs.TestChRootedFs

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testList() throws IOException { FileStatus fs=fc.getFileStatus(new Path("/")); Assert.assertTrue(fs.isDirectory()); Assert.assertEquals(fs.getPath(),chrootedTo); FileStatus[] dirPaths=fc.util().listStatus(new Path("/")); Assert.assertEquals(0,dirPaths.length); fileContextTestHelper.createFileNonRecursive(fc,"/foo"); fileContextTestHelper.createFileNonRecursive(fc,"/bar"); fc.mkdir(new Path("/dirX"),FileContext.DEFAULT_PERM,false); fc.mkdir(fileContextTestHelper.getTestRootPath(fc,"/dirY"),FileContext.DEFAULT_PERM,false); fc.mkdir(new Path("/dirX/dirXX"),FileContext.DEFAULT_PERM,false); dirPaths=fc.util().listStatus(new Path("/")); Assert.assertEquals(4,dirPaths.length); fs=fileContextTestHelper.containsPath(fcTarget,"foo",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs=fileContextTestHelper.containsPath(fcTarget,"bar",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isFile()); fs=fileContextTestHelper.containsPath(fcTarget,"dirX",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); fs=fileContextTestHelper.containsPath(fcTarget,"dirY",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue(fs.isDirectory()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWorkingDirectory() throws Exception { fc.mkdir(new Path("/testWd"),FileContext.DEFAULT_PERM,false); Path workDir=new Path("/testWd"); Path fqWd=fc.makeQualified(workDir); fc.setWorkingDirectory(workDir); Assert.assertEquals(fqWd,fc.getWorkingDirectory()); fc.setWorkingDirectory(new Path(".")); Assert.assertEquals(fqWd,fc.getWorkingDirectory()); fc.setWorkingDirectory(new Path("..")); Assert.assertEquals(fqWd.getParent(),fc.getWorkingDirectory()); workDir=new Path("/testWd"); fqWd=fc.makeQualified(workDir); fc.setWorkingDirectory(workDir); Assert.assertEquals(fqWd,fc.getWorkingDirectory()); Path relativeDir=new Path("existingDir1"); Path absoluteDir=new Path(workDir,"existingDir1"); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); Path fqAbsoluteDir=fc.makeQualified(absoluteDir); fc.setWorkingDirectory(relativeDir); Assert.assertEquals(fqAbsoluteDir,fc.getWorkingDirectory()); absoluteDir=new Path("/test/existingDir2"); fqAbsoluteDir=fc.makeQualified(absoluteDir); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); fc.setWorkingDirectory(absoluteDir); Assert.assertEquals(fqAbsoluteDir,fc.getWorkingDirectory()); Path absolutePath=new Path(absoluteDir,"foo"); fc.create(absolutePath,EnumSet.of(CreateFlag.CREATE)).close(); fc.open(new Path("foo")).close(); fc.mkdir(new Path("newDir"),FileContext.DEFAULT_PERM,true); Assert.assertTrue(isDir(fc,new Path(absoluteDir,"newDir"))); absoluteDir=fileContextTestHelper.getTestRootPath(fc,"nonexistingPath"); try { fc.setWorkingDirectory(absoluteDir); Assert.fail("cd to non existing dir should have failed"); } catch ( Exception e) { } final String LOCAL_FS_ROOT_URI="file:///tmp/test"; absoluteDir=new Path(LOCAL_FS_ROOT_URI + "/existingDir"); fc.mkdir(absoluteDir,FileContext.DEFAULT_PERM,true); fc.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fc.getWorkingDirectory()); }

InternalCallVerifier EqualityVerifier 
@Test public void testBasicPaths(){ URI uri=fc.getDefaultFileSystem().getUri(); Assert.assertEquals(chrootedTo.toUri(),uri); Assert.assertEquals(fc.makeQualified(new Path(System.getProperty("user.home"))),fc.getWorkingDirectory()); Assert.assertEquals(fc.makeQualified(new Path(System.getProperty("user.home"))),fc.getHomeDirectory()); Assert.assertEquals(new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI,null),fc.makeQualified(new Path("/foo/bar"))); }

InternalCallVerifier EqualityVerifier 
@Test public void testResolvePath() throws IOException { Assert.assertEquals(chrootedTo,fc.getDefaultFileSystem().resolvePath(new Path("/"))); fileContextTestHelper.createFile(fc,"/foo"); Assert.assertEquals(new Path(chrootedTo,"foo"),fc.getDefaultFileSystem().resolvePath(new Path("/foo"))); }

Class: org.apache.hadoop.fs.viewfs.TestFSMainOperationsLocalFileSystem

EqualityVerifier 
@Test @Override public void testWDAbsolute() throws IOException { Path absoluteDir=getTestRootPath(fSys,"test/existingDir"); fSys.mkdirs(absoluteDir); fSys.setWorkingDirectory(absoluteDir); Assert.assertEquals(absoluteDir,fSys.getWorkingDirectory()); }

Class: org.apache.hadoop.fs.viewfs.TestViewFileSystemDelegation

EqualityVerifier 
@Test public void testSanity(){ assertEquals("fs1:/",fs1.getUri().toString()); assertEquals("fs2:/",fs2.getUri().toString()); }

Class: org.apache.hadoop.fs.viewfs.TestViewFileSystemDelegationTokenSupport

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetChildFileSystems() throws Exception { assertNull(fs1.getChildFileSystems()); assertNull(fs2.getChildFileSystems()); List children=Arrays.asList(viewFs.getChildFileSystems()); assertEquals(2,children.size()); assertTrue(children.contains(fs1)); assertTrue(children.contains(fs2)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAddDelegationTokens() throws Exception { Credentials creds=new Credentials(); Token fs1Tokens[]=addTokensWithCreds(fs1,creds); assertEquals(1,fs1Tokens.length); assertEquals(1,creds.numberOfTokens()); Token fs2Tokens[]=addTokensWithCreds(fs2,creds); assertEquals(1,fs2Tokens.length); assertEquals(2,creds.numberOfTokens()); Credentials savedCreds=creds; creds=new Credentials(); Token viewFsTokens[]=viewFs.addDelegationTokens("me",creds); assertEquals(2,viewFsTokens.length); assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens())); assertEquals(savedCreds.numberOfTokens(),creds.numberOfTokens()); viewFsTokens=viewFs.addDelegationTokens("me",creds); assertEquals(0,viewFsTokens.length); assertTrue(creds.getAllTokens().containsAll(savedCreds.getAllTokens())); assertEquals(savedCreds.numberOfTokens(),creds.numberOfTokens()); }

Class: org.apache.hadoop.fs.viewfs.TestViewFileSystemWithAcls

InternalCallVerifier EqualityVerifier 
/** * Verify a ViewFs wrapped over multiple federated NameNodes will * dispatch the ACL operations to the correct NameNode. */ @Test public void testAclOnMountEntry() throws Exception { List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE)); fsView.setAcl(mountOnNn1,aclSpec); AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)}; assertArrayEquals(expected,aclEntryArray(fsView.getAclStatus(mountOnNn1))); assertArrayEquals(expected,aclEntryArray(fHdfs.getAclStatus(targetTestRoot))); aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",READ)); fsView.modifyAclEntries(mountOnNn1,aclSpec); expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(DEFAULT,USER,READ_WRITE),aclEntry(DEFAULT,USER,"foo",READ),aclEntry(DEFAULT,GROUP,READ),aclEntry(DEFAULT,MASK,READ),aclEntry(DEFAULT,OTHER,NONE)}; assertArrayEquals(expected,aclEntryArray(fsView.getAclStatus(mountOnNn1))); fsView.removeDefaultAcl(mountOnNn1); expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)}; assertArrayEquals(expected,aclEntryArray(fsView.getAclStatus(mountOnNn1))); assertArrayEquals(expected,aclEntryArray(fHdfs.getAclStatus(targetTestRoot))); assertEquals(0,fsView.getAclStatus(mountOnNn2).getEntries().size()); assertEquals(0,fHdfs2.getAclStatus(targetTestRoot2).getEntries().size()); fsView.removeAcl(mountOnNn1); assertEquals(0,fsView.getAclStatus(mountOnNn1).getEntries().size()); assertEquals(0,fHdfs.getAclStatus(targetTestRoot).getEntries().size()); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bar",READ)); fsView.modifyAclEntries(mountOnNn2,aclSpec); expected=new AclEntry[]{aclEntry(ACCESS,USER,"bar",READ),aclEntry(ACCESS,GROUP,READ_EXECUTE)}; assertArrayEquals(expected,aclEntryArray(fsView.getAclStatus(mountOnNn2))); assertArrayEquals(expected,aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2))); fsView.removeAclEntries(mountOnNn2,Lists.newArrayList(aclEntry(ACCESS,USER,"bar",READ))); expected=new AclEntry[]{aclEntry(ACCESS,GROUP,READ_EXECUTE)}; assertArrayEquals(expected,aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2))); fsView.removeAcl(mountOnNn2); assertEquals(0,fsView.getAclStatus(mountOnNn2).getEntries().size()); assertEquals(0,fHdfs2.getAclStatus(targetTestRoot2).getEntries().size()); }

Class: org.apache.hadoop.fs.viewfs.TestViewFileSystemWithAuthorityLocalFileSystem

EqualityVerifier 
@Override @Test public void testBasicPaths(){ Assert.assertEquals(schemeWithAuthority,fsView.getUri()); Assert.assertEquals(fsView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fsView.getWorkingDirectory()); Assert.assertEquals(fsView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fsView.getHomeDirectory()); Assert.assertEquals(new Path("/foo/bar").makeQualified(schemeWithAuthority,null),fsView.makeQualified(new Path("/foo/bar"))); }

Class: org.apache.hadoop.fs.viewfs.TestViewFileSystemWithXAttrs

InternalCallVerifier EqualityVerifier 
/** * Verify a ViewFileSystem wrapped over multiple federated NameNodes will * dispatch the XAttr operations to the correct NameNode. */ @Test public void testXAttrOnMountEntry() throws Exception { fsView.setXAttr(mountOnNn1,name1,value1); fsView.setXAttr(mountOnNn1,name2,value2); assertEquals(2,fsView.getXAttrs(mountOnNn1).size()); assertArrayEquals(value1,fsView.getXAttr(mountOnNn1,name1)); assertArrayEquals(value2,fsView.getXAttr(mountOnNn1,name2)); assertArrayEquals(value1,fHdfs.getXAttr(targetTestRoot,name1)); assertArrayEquals(value2,fHdfs.getXAttr(targetTestRoot,name2)); assertEquals(0,fsView.getXAttrs(mountOnNn2).size()); assertEquals(0,fHdfs2.getXAttrs(targetTestRoot2).size()); fsView.removeXAttr(mountOnNn1,name1); fsView.removeXAttr(mountOnNn1,name2); assertEquals(0,fsView.getXAttrs(mountOnNn1).size()); assertEquals(0,fHdfs.getXAttrs(targetTestRoot).size()); fsView.setXAttr(mountOnNn2,name1,value1); fsView.setXAttr(mountOnNn2,name2,value2); assertEquals(2,fsView.getXAttrs(mountOnNn2).size()); assertArrayEquals(value1,fsView.getXAttr(mountOnNn2,name1)); assertArrayEquals(value2,fsView.getXAttr(mountOnNn2,name2)); assertArrayEquals(value1,fHdfs2.getXAttr(targetTestRoot2,name1)); assertArrayEquals(value2,fHdfs2.getXAttr(targetTestRoot2,name2)); fsView.removeXAttr(mountOnNn2,name1); fsView.removeXAttr(mountOnNn2,name2); assertEquals(0,fsView.getXAttrs(mountOnNn2).size()); assertEquals(0,fHdfs2.getXAttrs(targetTestRoot2).size()); }

Class: org.apache.hadoop.fs.viewfs.TestViewFsDefaultValue

InternalCallVerifier EqualityVerifier 
/** * Test that getContentSummary can be retrieved on the client side. */ @Test public void testGetContentSummary() throws IOException { FileSystem hFs=cluster.getFileSystem(0); final DistributedFileSystem dfs=(DistributedFileSystem)hFs; dfs.setQuota(testFileDirPath,100,500); ContentSummary cs=vfs.getContentSummary(testFileDirPath); assertEquals(100,cs.getQuota()); assertEquals(500,cs.getSpaceQuota()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that default blocksize values can be retrieved on the client side. */ @Test public void testGetDefaultBlockSize() throws IOException, URISyntaxException { try { vfs.getDefaultBlockSize(); fail("getServerDefaults on viewFs did not throw excetion!"); } catch ( NotInMountpointException e) { assertEquals(vfs.getDefaultBlockSize(testFilePath),DFS_BLOCK_SIZE_DEFAULT); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that server default values can be retrieved on the client side. */ @Test public void testServerDefaults() throws IOException { try { FsServerDefaults serverDefaults=vfs.getServerDefaults(); fail("getServerDefaults on viewFs did not throw excetion!"); } catch ( NotInMountpointException e) { FsServerDefaults serverDefaults=vfs.getServerDefaults(testFilePath); assertEquals(DFS_BLOCK_SIZE_DEFAULT,serverDefaults.getBlockSize()); assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT,serverDefaults.getBytesPerChecksum()); assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT,serverDefaults.getWritePacketSize()); assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT,serverDefaults.getFileBufferSize()); assertEquals(DFS_REPLICATION_DEFAULT + 1,serverDefaults.getReplication()); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that default replication values can be retrieved on the client side. */ @Test public void testGetDefaultReplication() throws IOException, URISyntaxException { try { vfs.getDefaultReplication(); fail("getDefaultReplication on viewFs did not throw excetion!"); } catch ( NotInMountpointException e) { assertEquals(vfs.getDefaultReplication(testFilePath),DFS_REPLICATION_DEFAULT + 1); } }

Class: org.apache.hadoop.fs.viewfs.TestViewFsFileStatusHdfs

InternalCallVerifier EqualityVerifier 
@Test public void testFileStatusSerialziation() throws IOException, URISyntaxException { long len=fileSystemTestHelper.createFile(fHdfs,testfilename); FileStatus stat=vfs.getFileStatus(new Path(testfilename)); assertEquals(len,stat.getLen()); DataOutputBuffer dob=new DataOutputBuffer(); stat.write(dob); DataInputBuffer dib=new DataInputBuffer(); dib.reset(dob.getData(),0,dob.getLength()); FileStatus deSer=new FileStatus(); deSer.readFields(dib); assertEquals(len,deSer.getLen()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetFileChecksum() throws IOException, URISyntaxException { fileSystemTestHelper.createFile(fHdfs,someFile); fileSystemTestHelper.createFile(fHdfs,fileSystemTestHelper.getTestRootPath(fHdfs,someFile + "other"),1,512); FileChecksum viewFSCheckSum=vfs.getFileChecksum(new Path("/vfstmp/someFileForTestGetFileChecksum")); FileChecksum hdfsCheckSum=fHdfs.getFileChecksum(new Path(someFile)); FileChecksum otherHdfsFileCheckSum=fHdfs.getFileChecksum(new Path(someFile + "other")); assertEquals("HDFS and ViewFS checksums were not the same",viewFSCheckSum,hdfsCheckSum); assertFalse("Some other HDFS file which should not have had the same " + "checksum as viewFS did!",viewFSCheckSum.equals(otherHdfsFileCheckSum)); }

Class: org.apache.hadoop.fs.viewfs.TestViewFsWithAcls

InternalCallVerifier EqualityVerifier 
/** * Verify a ViewFs wrapped over multiple federated NameNodes will * dispatch the ACL operations to the correct NameNode. */ @Test public void testAclOnMountEntry() throws Exception { List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE)); fcView.setAcl(mountOnNn1,aclSpec); AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)}; assertArrayEquals(expected,aclEntryArray(fcView.getAclStatus(mountOnNn1))); assertArrayEquals(expected,aclEntryArray(fc.getAclStatus(targetTestRoot))); aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",READ)); fcView.modifyAclEntries(mountOnNn1,aclSpec); expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(DEFAULT,USER,READ_WRITE),aclEntry(DEFAULT,USER,"foo",READ),aclEntry(DEFAULT,GROUP,READ),aclEntry(DEFAULT,MASK,READ),aclEntry(DEFAULT,OTHER,NONE)}; assertArrayEquals(expected,aclEntryArray(fcView.getAclStatus(mountOnNn1))); fcView.removeDefaultAcl(mountOnNn1); expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)}; assertArrayEquals(expected,aclEntryArray(fcView.getAclStatus(mountOnNn1))); assertArrayEquals(expected,aclEntryArray(fc.getAclStatus(targetTestRoot))); assertEquals(0,fcView.getAclStatus(mountOnNn2).getEntries().size()); assertEquals(0,fc2.getAclStatus(targetTestRoot2).getEntries().size()); fcView.removeAcl(mountOnNn1); assertEquals(0,fcView.getAclStatus(mountOnNn1).getEntries().size()); assertEquals(0,fc.getAclStatus(targetTestRoot).getEntries().size()); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bar",READ)); fcView.modifyAclEntries(mountOnNn2,aclSpec); expected=new AclEntry[]{aclEntry(ACCESS,USER,"bar",READ),aclEntry(ACCESS,GROUP,READ_EXECUTE)}; assertArrayEquals(expected,aclEntryArray(fcView.getAclStatus(mountOnNn2))); assertArrayEquals(expected,aclEntryArray(fc2.getAclStatus(targetTestRoot2))); fcView.removeAclEntries(mountOnNn2,Lists.newArrayList(aclEntry(ACCESS,USER,"bar",READ))); expected=new AclEntry[]{aclEntry(ACCESS,GROUP,READ_EXECUTE)}; assertArrayEquals(expected,aclEntryArray(fc2.getAclStatus(targetTestRoot2))); fcView.removeAcl(mountOnNn2); assertEquals(0,fcView.getAclStatus(mountOnNn2).getEntries().size()); assertEquals(0,fc2.getAclStatus(targetTestRoot2).getEntries().size()); }

Class: org.apache.hadoop.fs.viewfs.TestViewFsWithAuthorityLocalFs

EqualityVerifier 
@Override @Test public void testBasicPaths(){ Assert.assertEquals(schemeWithAuthority,fcView.getDefaultFileSystem().getUri()); Assert.assertEquals(fcView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fcView.getWorkingDirectory()); Assert.assertEquals(fcView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fcView.getHomeDirectory()); Assert.assertEquals(new Path("/foo/bar").makeQualified(schemeWithAuthority,null),fcView.makeQualified(new Path("/foo/bar"))); }

Class: org.apache.hadoop.fs.viewfs.TestViewFsWithXAttrs

InternalCallVerifier EqualityVerifier 
/** * Verify a ViewFs wrapped over multiple federated NameNodes will * dispatch the XAttr operations to the correct NameNode. */ @Test public void testXAttrOnMountEntry() throws Exception { fcView.setXAttr(mountOnNn1,name1,value1); fcView.setXAttr(mountOnNn1,name2,value2); assertEquals(2,fcView.getXAttrs(mountOnNn1).size()); assertArrayEquals(value1,fcView.getXAttr(mountOnNn1,name1)); assertArrayEquals(value2,fcView.getXAttr(mountOnNn1,name2)); assertArrayEquals(value1,fc.getXAttr(targetTestRoot,name1)); assertArrayEquals(value2,fc.getXAttr(targetTestRoot,name2)); assertEquals(0,fcView.getXAttrs(mountOnNn2).size()); assertEquals(0,fc2.getXAttrs(targetTestRoot2).size()); fcView.removeXAttr(mountOnNn1,name1); fcView.removeXAttr(mountOnNn1,name2); assertEquals(0,fcView.getXAttrs(mountOnNn1).size()); assertEquals(0,fc.getXAttrs(targetTestRoot).size()); fcView.setXAttr(mountOnNn2,name1,value1); fcView.setXAttr(mountOnNn2,name2,value2); assertEquals(2,fcView.getXAttrs(mountOnNn2).size()); assertArrayEquals(value1,fcView.getXAttr(mountOnNn2,name1)); assertArrayEquals(value2,fcView.getXAttr(mountOnNn2,name2)); assertArrayEquals(value1,fc2.getXAttr(targetTestRoot2,name1)); assertArrayEquals(value2,fc2.getXAttr(targetTestRoot2,name2)); fcView.removeXAttr(mountOnNn2,name1); fcView.removeXAttr(mountOnNn2,name2); assertEquals(0,fcView.getXAttrs(mountOnNn2).size()); assertEquals(0,fc2.getXAttrs(targetTestRoot2).size()); }

Class: org.apache.hadoop.fs.viewfs.TestViewfsFileStatus

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testFileStatusSerialziation() throws IOException, URISyntaxException { String testfilename="testFileStatusSerialziation"; TEST_DIR.mkdirs(); File infile=new File(TEST_DIR,testfilename); final byte[] content="dingos".getBytes(); FileOutputStream fos=null; try { fos=new FileOutputStream(infile); fos.write(content); } finally { if (fos != null) { fos.close(); } } assertEquals((long)content.length,infile.length()); Configuration conf=new Configuration(); ConfigUtil.addLink(conf,"/foo/bar/baz",TEST_DIR.toURI()); FileSystem vfs=FileSystem.get(FsConstants.VIEWFS_URI,conf); assertEquals(ViewFileSystem.class,vfs.getClass()); FileStatus stat=vfs.getFileStatus(new Path("/foo/bar/baz",testfilename)); assertEquals(content.length,stat.getLen()); DataOutputBuffer dob=new DataOutputBuffer(); stat.write(dob); DataInputBuffer dib=new DataInputBuffer(); dib.reset(dob.getData(),0,dob.getLength()); FileStatus deSer=new FileStatus(); deSer.readFields(dib); assertEquals(content.length,deSer.getLen()); }

Class: org.apache.hadoop.fs.viewfs.ViewFileSystemBaseTest

InternalCallVerifier EqualityVerifier 
@Test public void testResolvePathThroughMountPoints() throws IOException { fileSystemTestHelper.createFile(fsView,"/user/foo"); Assert.assertEquals(new Path(targetTestRoot,"user/foo"),fsView.resolvePath(new Path("/user/foo"))); fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirX")); Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),fsView.resolvePath(new Path("/user/dirX"))); fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/user/dirX/dirY")); Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),fsView.resolvePath(new Path("/user/dirX/dirY"))); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetBlockLocations() throws IOException { Path targetFilePath=new Path(targetTestRoot,"data/largeFile"); FileSystemTestHelper.createFile(fsTarget,targetFilePath,10,1024); Path viewFilePath=new Path("/data/largeFile"); Assert.assertTrue("Created File should be type File",fsView.isFile(viewFilePath)); BlockLocation[] viewBL=fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath),0,10240 + 100); Assert.assertEquals(SupportsBlocks ? 10 : 1,viewBL.length); BlockLocation[] targetBL=fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath),0,10240 + 100); compareBLs(viewBL,targetBL); fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath),0,10240 + 100); targetBL=fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath),0,10240 + 100); compareBLs(viewBL,targetBL); }

InternalCallVerifier EqualityVerifier 
@Test public void testResolvePathMountPoints() throws IOException { Assert.assertEquals(new Path(targetTestRoot,"user"),fsView.resolvePath(new Path("/user"))); Assert.assertEquals(new Path(targetTestRoot,"data"),fsView.resolvePath(new Path("/data"))); Assert.assertEquals(new Path(targetTestRoot,"dir2"),fsView.resolvePath(new Path("/internalDir/linkToDir2"))); Assert.assertEquals(new Path(targetTestRoot,"dir3"),fsView.resolvePath(new Path("/internalDir/internalDir2/linkToDir3"))); }

InternalCallVerifier EqualityVerifier 
@Test public void testResolvePathInternalPaths() throws IOException { Assert.assertEquals(new Path("/"),fsView.resolvePath(new Path("/"))); Assert.assertEquals(new Path("/internalDir"),fsView.resolvePath(new Path("/internalDir"))); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetMountPoints(){ ViewFileSystem viewfs=(ViewFileSystem)fsView; MountPoint[] mountPoints=viewfs.getMountPoints(); Assert.assertEquals(getExpectedMountPoints(),mountPoints.length); }

InternalCallVerifier EqualityVerifier 
@Test public void testBasicPaths(){ Assert.assertEquals(FsConstants.VIEWFS_URI,fsView.getUri()); Assert.assertEquals(fsView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fsView.getWorkingDirectory()); Assert.assertEquals(fsView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fsView.getHomeDirectory()); Assert.assertEquals(new Path("/foo/bar").makeQualified(FsConstants.VIEWFS_URI,null),fsView.makeQualified(new Path("/foo/bar"))); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInternalGetAclStatus() throws IOException { final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser(); AclStatus aclStatus=fsView.getAclStatus(new Path("/internalDir")); assertEquals(aclStatus.getOwner(),currentUser.getUserName()); assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]); assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555)); assertFalse(aclStatus.isStickyBit()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testListOnMountTargetDirs() throws IOException { FileStatus[] dirPaths=fsView.listStatus(new Path("/data")); FileStatus fs; Assert.assertEquals(0,dirPaths.length); long len=fileSystemTestHelper.createFile(fsView,"/data/foo"); dirPaths=fsView.listStatus(new Path("/data")); Assert.assertEquals(1,dirPaths.length); fs=fileSystemTestHelper.containsPath(fsView,"/data/foo",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("Created file shoudl appear as a file",fs.isFile()); Assert.assertEquals(len,fs.getLen()); fsView.mkdirs(fileSystemTestHelper.getTestRootPath(fsView,"/data/dirX")); dirPaths=fsView.listStatus(new Path("/data")); Assert.assertEquals(2,dirPaths.length); fs=fileSystemTestHelper.containsPath(fsView,"/data/foo",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("Created file shoudl appear as a file",fs.isFile()); fs=fileSystemTestHelper.containsPath(fsView,"/data/dirX",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("Created dir should appear as a dir",fs.isDirectory()); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetDelegationTokensWithCredentials() throws IOException { Credentials credentials=new Credentials(); List> delTokens=Arrays.asList(fsView.addDelegationTokens("sanjay",credentials)); int expectedTokenCount=getExpectedDelegationTokenCountWithCredentials(); Assert.assertEquals(expectedTokenCount,delTokens.size()); Credentials newCredentials=new Credentials(); for (int i=0; i < expectedTokenCount / 2; i++) { Token token=delTokens.get(i); newCredentials.addToken(token.getService(),token); } List> delTokens2=Arrays.asList(fsView.addDelegationTokens("sanjay",newCredentials)); Assert.assertEquals((expectedTokenCount + 1) / 2,delTokens2.size()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test "readOps" (e.g. list, listStatus) * on internal dirs of mount table * These operations should succeed. */ @Test public void testListOnInternalDirsOfMountTable() throws IOException { FileStatus[] dirPaths=fsView.listStatus(new Path("/")); FileStatus fs; verifyRootChildren(dirPaths); dirPaths=fsView.listStatus(new Path("/internalDir")); Assert.assertEquals(2,dirPaths.length); fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/internalDir2",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isDirectory()); fs=fileSystemTestHelper.containsPath(fsView,"/internalDir/linkToDir2",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); }

InternalCallVerifier EqualityVerifier 
/** * This default implementation is when viewfs has mount points * into file systems, such as LocalFs that do no have delegation tokens. * It should be overridden for when mount points into hdfs. */ @Test public void testGetDelegationTokens() throws IOException { Token[] delTokens=fsView.addDelegationTokens("sanjay",new Credentials()); Assert.assertEquals(getExpectedDelegationTokenCount(),delTokens.length); }

Class: org.apache.hadoop.fs.viewfs.ViewFsBaseTest

InternalCallVerifier EqualityVerifier 
/** * This default implementation is when viewfs has mount points * into file systems, such as LocalFs that do no have delegation tokens. * It should be overridden for when mount points into hdfs. */ @Test public void testGetDelegationTokens() throws IOException { List> delTokens=fcView.getDelegationTokens(new Path("/"),"sanjay"); Assert.assertEquals(getExpectedDelegationTokenCount(),delTokens.size()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInternalGetAclStatus() throws IOException { final UserGroupInformation currentUser=UserGroupInformation.getCurrentUser(); AclStatus aclStatus=fcView.getAclStatus(new Path("/internalDir")); assertEquals(aclStatus.getOwner(),currentUser.getUserName()); assertEquals(aclStatus.getGroup(),currentUser.getGroupNames()[0]); assertEquals(aclStatus.getEntries(),AclUtil.getMinimalAcl(PERMISSION_555)); assertFalse(aclStatus.isStickyBit()); }

InternalCallVerifier EqualityVerifier 
@Test public void testResolvePathMountPoints() throws IOException { Assert.assertEquals(new Path(targetTestRoot,"user"),fcView.resolvePath(new Path("/user"))); Assert.assertEquals(new Path(targetTestRoot,"data"),fcView.resolvePath(new Path("/data"))); Assert.assertEquals(new Path(targetTestRoot,"dir2"),fcView.resolvePath(new Path("/internalDir/linkToDir2"))); Assert.assertEquals(new Path(targetTestRoot,"dir3"),fcView.resolvePath(new Path("/internalDir/internalDir2/linkToDir3"))); }

InternalCallVerifier EqualityVerifier 
@Test public void testSymlinkTarget() throws IOException { Assert.assertEquals(fcView.getLinkTarget(new Path("/user")),(new Path(targetTestRoot,"user"))); Assert.assertEquals(fcView.getLinkTarget(new Path("/data")),(new Path(targetTestRoot,"data"))); Assert.assertEquals(fcView.getLinkTarget(new Path("/internalDir/linkToDir2")),(new Path(targetTestRoot,"dir2"))); Assert.assertEquals(fcView.getLinkTarget(new Path("/internalDir/internalDir2/linkToDir3")),(new Path(targetTestRoot,"dir3"))); Assert.assertEquals(fcView.getLinkTarget(new Path("/linkToAFile")),(new Path(targetTestRoot,"aFile"))); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetMountPoints(){ ViewFs viewfs=(ViewFs)fcView.getDefaultFileSystem(); MountPoint[] mountPoints=viewfs.getMountPoints(); Assert.assertEquals(8,mountPoints.length); }

InternalCallVerifier EqualityVerifier 
@Test public void testResolvePathThroughMountPoints() throws IOException { fileContextTestHelper.createFile(fcView,"/user/foo"); Assert.assertEquals(new Path(targetTestRoot,"user/foo"),fcView.resolvePath(new Path("/user/foo"))); fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,"/user/dirX"),FileContext.DEFAULT_PERM,false); Assert.assertEquals(new Path(targetTestRoot,"user/dirX"),fcView.resolvePath(new Path("/user/dirX"))); fcView.mkdir(fileContextTestHelper.getTestRootPath(fcView,"/user/dirX/dirY"),FileContext.DEFAULT_PERM,false); Assert.assertEquals(new Path(targetTestRoot,"user/dirX/dirY"),fcView.resolvePath(new Path("/user/dirX/dirY"))); }

InternalCallVerifier EqualityVerifier 
@Test public void testResolvePathInternalPaths() throws IOException { Assert.assertEquals(new Path("/"),fcView.resolvePath(new Path("/"))); Assert.assertEquals(new Path("/internalDir"),fcView.resolvePath(new Path("/internalDir"))); }

InternalCallVerifier EqualityVerifier 
@Test public void testBasicPaths(){ Assert.assertEquals(FsConstants.VIEWFS_URI,fcView.getDefaultFileSystem().getUri()); Assert.assertEquals(fcView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fcView.getWorkingDirectory()); Assert.assertEquals(fcView.makeQualified(new Path("/user/" + System.getProperty("user.name"))),fcView.getHomeDirectory()); Assert.assertEquals(new Path("/foo/bar").makeQualified(FsConstants.VIEWFS_URI,null),fcView.makeQualified(new Path("/foo/bar"))); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test "readOps" (e.g. list, listStatus) * on internal dirs of mount table * These operations should succeed. */ @Test public void testListOnInternalDirsOfMountTable() throws IOException { FileStatus[] dirPaths=fcView.util().listStatus(new Path("/")); FileStatus fs; Assert.assertEquals(7,dirPaths.length); fs=fileContextTestHelper.containsPath(fcView,"/user",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); fs=fileContextTestHelper.containsPath(fcView,"/data",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); fs=fileContextTestHelper.containsPath(fcView,"/internalDir",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory()); fs=fileContextTestHelper.containsPath(fcView,"/danglingLink",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); fs=fileContextTestHelper.containsPath(fcView,"/linkToAFile",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); dirPaths=fcView.util().listStatus(new Path("/internalDir")); Assert.assertEquals(2,dirPaths.length); fs=fileContextTestHelper.containsPath(fcView,"/internalDir/internalDir2",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("InternalDirs should appear as dir",fs.isDirectory()); fs=fileContextTestHelper.containsPath(fcView,"/internalDir/linkToDir2",dirPaths); Assert.assertNotNull(fs); Assert.assertTrue("A mount should appear as symlink",fs.isSymlink()); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetBlockLocations() throws IOException { Path targetFilePath=new Path(targetTestRoot,"data/largeFile"); FileContextTestHelper.createFile(fcTarget,targetFilePath,10,1024); Path viewFilePath=new Path("/data/largeFile"); checkFileStatus(fcView,viewFilePath.toString(),fileType.isFile); BlockLocation[] viewBL=fcView.getFileBlockLocations(viewFilePath,0,10240 + 100); Assert.assertEquals(SupportsBlocks ? 10 : 1,viewBL.length); BlockLocation[] targetBL=fcTarget.getFileBlockLocations(targetFilePath,0,10240 + 100); compareBLs(viewBL,targetBL); fcView.getFileBlockLocations(viewFilePath,0,10240 + 100); targetBL=fcTarget.getFileBlockLocations(targetFilePath,0,10240 + 100); compareBLs(viewBL,targetBL); }

Class: org.apache.hadoop.ha.TestActiveStandbyElector

UtilityVerifier EqualityVerifier HybridVerifier 
/** * verify that receiveActiveData gives data when active exists, tells that * active does not exist and reports error in getting active information * @throws IOException * @throws InterruptedException * @throws KeeperException * @throws ActiveNotFoundException */ @Test public void testGetActiveData() throws ActiveNotFoundException, KeeperException, InterruptedException, IOException { byte[] data=new byte[8]; Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenReturn(data); Assert.assertEquals(data,elector.getActiveData()); Mockito.verify(mockZK,Mockito.times(1)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject()); Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenThrow(new KeeperException.NoNodeException()); try { elector.getActiveData(); Assert.fail("ActiveNotFoundException expected"); } catch ( ActiveNotFoundException e) { Mockito.verify(mockZK,Mockito.times(2)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject()); } try { Mockito.when(mockZK.getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject())).thenThrow(new KeeperException.AuthFailedException()); elector.getActiveData(); Assert.fail("KeeperException.AuthFailedException expected"); } catch ( KeeperException.AuthFailedException ke) { Mockito.verify(mockZK,Mockito.times(3)).getData(Mockito.eq(ZK_LOCK_NAME),Mockito.eq(false),Mockito.anyObject()); } }

EqualityVerifier 
/** * verify quit election terminates connection and there are no new watches. * next call to joinElection creates new connection and performs election */ @Test public void testQuitElection() throws Exception { elector.joinElection(data); Mockito.verify(mockZK,Mockito.times(0)).close(); elector.quitElection(true); Mockito.verify(mockZK,Mockito.times(1)).close(); verifyExistCall(0); byte[] data=new byte[8]; elector.joinElection(data); Assert.assertEquals(2,count); elector.processResult(Code.NODEEXISTS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); Mockito.verify(mockApp,Mockito.times(1)).becomeStandby(); verifyExistCall(1); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that, when the callback fails to enter active state, * the elector rejoins the election after sleeping for a short period. */ @Test public void testFailToBecomeActive() throws Exception { mockNoPriorActive(); elector.joinElection(data); Assert.assertEquals(0,elector.sleptFor); Mockito.doThrow(new ServiceFailedException("failed to become active")).when(mockApp).becomeActive(); elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); Mockito.verify(mockApp).becomeActive(); Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK); Assert.assertEquals(2,count); Assert.assertTrue(elector.sleptFor > 0); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that, when the callback fails to enter active state, after * a ZK disconnect (i.e from the StatCallback), that the elector rejoins * the election after sleeping for a short period. */ @Test public void testFailToBecomeActiveAfterZKDisconnect() throws Exception { mockNoPriorActive(); elector.joinElection(data); Assert.assertEquals(0,elector.sleptFor); elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK); elector.processResult(Code.NODEEXISTS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); verifyExistCall(1); Stat stat=new Stat(); stat.setEphemeralOwner(1L); Mockito.when(mockZK.getSessionId()).thenReturn(1L); Mockito.doThrow(new ServiceFailedException("fail to become active")).when(mockApp).becomeActive(); elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,stat); Mockito.verify(mockApp,Mockito.times(1)).becomeActive(); Mockito.verify(mockZK,Mockito.times(3)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK); Assert.assertEquals(2,count); Assert.assertTrue(elector.sleptFor > 0); }

EqualityVerifier 
/** * verify behavior of watcher.process callback with non-node event */ @Test public void testProcessCallbackEventNone() throws Exception { mockNoPriorActive(); elector.joinElection(data); WatchedEvent mockEvent=Mockito.mock(WatchedEvent.class); Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.None); Mockito.when(mockEvent.getState()).thenReturn(Event.KeeperState.SyncConnected); elector.processWatchEvent(mockZK,mockEvent); Mockito.verify(mockZK,Mockito.times(0)).exists(Mockito.anyString(),Mockito.anyBoolean(),Mockito.anyObject(),Mockito.anyObject()); Mockito.when(mockEvent.getState()).thenReturn(Event.KeeperState.Disconnected); elector.processWatchEvent(mockZK,mockEvent); Mockito.verify(mockApp,Mockito.times(1)).enterNeutralMode(); Mockito.when(mockEvent.getState()).thenReturn(Event.KeeperState.SyncConnected); elector.processWatchEvent(mockZK,mockEvent); verifyExistCall(1); Mockito.when(mockEvent.getState()).thenReturn(Event.KeeperState.Expired); elector.processWatchEvent(mockZK,mockEvent); Mockito.verify(mockApp,Mockito.times(1)).enterNeutralMode(); Assert.assertEquals(2,count); Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK); elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); Mockito.verify(mockApp,Mockito.times(1)).becomeActive(); verifyExistCall(2); Mockito.when(mockEvent.getState()).thenReturn(Event.KeeperState.AuthFailed); elector.processWatchEvent(mockZK,mockEvent); Mockito.verify(mockApp,Mockito.times(1)).notifyFatalError("Unexpected Zookeeper watch event state: AuthFailed"); Mockito.verify(mockApp,Mockito.times(1)).enterNeutralMode(); }

EqualityVerifier 
/** * verify behavior of watcher.process with node event */ @Test public void testProcessCallbackEventNode() throws Exception { mockNoPriorActive(); elector.joinElection(data); elector.processResult(Code.NODEEXISTS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); Mockito.verify(mockApp,Mockito.times(1)).becomeStandby(); verifyExistCall(1); WatchedEvent mockEvent=Mockito.mock(WatchedEvent.class); Mockito.when(mockEvent.getPath()).thenReturn(ZK_LOCK_NAME); Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.NodeDataChanged); elector.processWatchEvent(mockZK,mockEvent); verifyExistCall(2); Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.NodeChildrenChanged); elector.processWatchEvent(mockZK,mockEvent); verifyExistCall(3); Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.NodeDeleted); elector.processWatchEvent(mockZK,mockEvent); Mockito.verify(mockApp,Mockito.times(0)).enterNeutralMode(); Mockito.verify(mockZK,Mockito.times(2)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK); elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); Mockito.verify(mockApp,Mockito.times(1)).becomeActive(); verifyExistCall(4); Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.NodeDeleted); elector.processWatchEvent(mockZK,mockEvent); Mockito.verify(mockApp,Mockito.times(1)).enterNeutralMode(); Mockito.verify(mockZK,Mockito.times(3)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK); elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); Mockito.verify(mockApp,Mockito.times(2)).becomeActive(); verifyExistCall(5); Mockito.when(mockEvent.getPath()).thenReturn(null); elector.processWatchEvent(mockZK,mockEvent); Mockito.verify(mockApp,Mockito.times(1)).notifyFatalError("Unexpected watch error from Zookeeper"); Assert.assertEquals(1,count); verifyExistCall(5); }

EqualityVerifier 
/** * verify that retry of network errors verifies master by session id and * becomes active if they match. monitoring is started. */ @Test public void testCreateNodeResultRetryBecomeActive() throws Exception { mockNoPriorActive(); elector.joinElection(data); elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); Mockito.verify(mockApp,Mockito.times(1)).notifyFatalError("Received create error from Zookeeper. code:CONNECTIONLOSS " + "for path " + ZK_LOCK_NAME + ". "+ "Not retrying further znode create connection errors."); elector.joinElection(data); Assert.assertEquals(2,count); elector.processResult(Code.CONNECTIONLOSS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); elector.processResult(Code.NODEEXISTS.intValue(),ZK_LOCK_NAME,mockZK,ZK_LOCK_NAME); verifyExistCall(1); Stat stat=new Stat(); stat.setEphemeralOwner(1L); Mockito.when(mockZK.getSessionId()).thenReturn(1L); elector.processResult(Code.OK.intValue(),ZK_LOCK_NAME,mockZK,stat); Mockito.verify(mockApp,Mockito.times(1)).becomeActive(); verifyExistCall(1); Mockito.verify(mockZK,Mockito.times(6)).create(ZK_LOCK_NAME,data,Ids.OPEN_ACL_UNSAFE,CreateMode.EPHEMERAL,elector,mockZK); }

Class: org.apache.hadoop.ha.TestFailoverController

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverToUnhealthyServiceFailsAndFailsback() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new HealthCheckFailedException("Failed!")).when(svc2.proxy).monitorHealth(); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Failover to unhealthy service"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverFromNonExistantServiceWithFencer() throws Exception { DummyHAService svc1=spy(new DummyHAService(null,svc1Addr)); HAServiceProtocol errorThrowingProxy=Mockito.mock(HAServiceProtocol.class,Mockito.withSettings().defaultAnswer(new ThrowsException(new IOException("Could not connect to host"))).extraInterfaces(Closeable.class)); Mockito.doNothing().when((Closeable)errorThrowingProxy).close(); Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy(Mockito.any(),Mockito.anyInt()); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); } catch ( FailoverFailedException ffe) { fail("Non-existant active prevented failover"); } Mockito.verify(svc1).getProxy(Mockito.any(),Mockito.eq(CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT)); assertEquals(HAServiceState.ACTIVE,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverToFaultyServiceFailsbackOK() throws Exception { DummyHAService svc1=spy(new DummyHAService(HAServiceState.ACTIVE,svc1Addr)); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc2.proxy).transitionToActive(anyReqInfo()); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Failover to already active service"); } catch ( FailoverFailedException ffe) { } verify(svc1.proxy).transitionToStandby(anyReqInfo()); verify(svc1.proxy).transitionToActive(anyReqInfo()); assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

EqualityVerifier 
@Test public void testFailoverAndFailback() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); AlwaysSucceedFencer.fenceCalled=0; doFailover(svc1,svc2,false,false); assertEquals(0,TestNodeFencer.AlwaysSucceedFencer.fenceCalled); assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(HAServiceState.ACTIVE,svc2.state); AlwaysSucceedFencer.fenceCalled=0; doFailover(svc2,svc1,false,false); assertEquals(0,TestNodeFencer.AlwaysSucceedFencer.fenceCalled); assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailureToFenceOnFailbackFailsTheFailback() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new IOException("Failed!")).when(svc2.proxy).transitionToActive(anyReqInfo()); svc1.fencer=svc2.fencer=setupFencer(AlwaysFailFencer.class.getName()); AlwaysFailFencer.fenceCalled=0; try { doFailover(svc1,svc2,false,false); fail("Failed over to service that won't transition to active"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(1,AlwaysFailFencer.fenceCalled); assertSame(svc2,AlwaysFailFencer.fencedSvc); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testWeDontFailbackIfActiveWasFenced() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc2.proxy).transitionToActive(anyReqInfo()); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,true,false); fail("Failed over to service that won't transition to active"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFencingFailureDuringFailover() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysFailFencer.class.getName()); AlwaysFailFencer.fenceCalled=0; try { doFailover(svc1,svc2,true,false); fail("Failed over even though fencing requested and failed"); } catch ( FailoverFailedException ffe) { } assertEquals(1,AlwaysFailFencer.fenceCalled); assertSame(svc1,AlwaysFailFencer.fencedSvc); assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverToNonExistantServiceFails() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=spy(new DummyHAService(null,svc2Addr)); Mockito.doThrow(new IOException("Failed to connect")).when(svc2).getProxy(Mockito.any(),Mockito.anyInt()); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Failed over to a non-existant standby"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.ACTIVE,svc1.state); }

UtilityVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverFromFaultyServiceFencingFailure() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc1.proxy).transitionToStandby(anyReqInfo()); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysFailFencer.class.getName()); AlwaysFailFencer.fenceCalled=0; try { doFailover(svc1,svc2,false,false); fail("Failed over even though fencing failed"); } catch ( FailoverFailedException ffe) { } assertEquals(1,AlwaysFailFencer.fenceCalled); assertSame(svc1,AlwaysFailFencer.fencedSvc); assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testWeFenceOnFailbackIfTransitionToActiveFails() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc2.proxy).transitionToActive(anyReqInfo()); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); AlwaysSucceedFencer.fenceCalled=0; try { doFailover(svc1,svc2,false,false); fail("Failed over to service that won't transition to active"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertSame(svc2,AlwaysSucceedFencer.fencedSvc); }

EqualityVerifier 
@Test public void testFailoverFromStandbyToStandby() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.STANDBY,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); doFailover(svc1,svc2,false,false); assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(HAServiceState.ACTIVE,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverToUnreadyService() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doReturn(STATE_NOT_READY).when(svc2.proxy).getServiceStatus(); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Can't failover to a service that's not ready"); } catch ( FailoverFailedException ffe) { if (!ffe.getMessage().contains("injected not ready")) { throw ffe; } } assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); doFailover(svc1,svc2,false,true); assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(HAServiceState.ACTIVE,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailbackToFaultyServiceFails() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc1.proxy).transitionToActive(anyReqInfo()); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc2.proxy).transitionToActive(anyReqInfo()); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Failover to already active service"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.STANDBY,svc1.state); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testSelfFailoverFails() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); AlwaysSucceedFencer.fenceCalled=0; try { doFailover(svc1,svc1,false,false); fail("Can't failover to yourself"); } catch ( FailoverFailedException ffe) { } assertEquals(0,TestNodeFencer.AlwaysSucceedFencer.fenceCalled); assertEquals(HAServiceState.ACTIVE,svc1.state); try { doFailover(svc2,svc2,false,false); fail("Can't failover to yourself"); } catch ( FailoverFailedException ffe) { } assertEquals(0,TestNodeFencer.AlwaysSucceedFencer.fenceCalled); assertEquals(HAServiceState.STANDBY,svc2.state); }

UtilityVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverFromFaultyServiceSucceeds() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); Mockito.doThrow(new ServiceFailedException("Failed!")).when(svc1.proxy).transitionToStandby(anyReqInfo()); DummyHAService svc2=new DummyHAService(HAServiceState.STANDBY,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); AlwaysSucceedFencer.fenceCalled=0; try { doFailover(svc1,svc2,false,false); } catch ( FailoverFailedException ffe) { fail("Faulty active prevented failover"); } assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertSame(svc1,AlwaysSucceedFencer.fencedSvc); assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.ACTIVE,svc2.state); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverFromActiveToActive() throws Exception { DummyHAService svc1=new DummyHAService(HAServiceState.ACTIVE,svc1Addr); DummyHAService svc2=new DummyHAService(HAServiceState.ACTIVE,svc2Addr); svc1.fencer=svc2.fencer=setupFencer(AlwaysSucceedFencer.class.getName()); try { doFailover(svc1,svc2,false,false); fail("Can't failover to an already active service"); } catch ( FailoverFailedException ffe) { } assertEquals(HAServiceState.ACTIVE,svc1.state); assertEquals(HAServiceState.ACTIVE,svc2.state); }

Class: org.apache.hadoop.ha.TestHAAdmin

EqualityVerifier 
@Test public void testAdminUsage() throws Exception { assertEquals(-1,runTool()); assertOutputContains("Usage:"); assertOutputContains("-transitionToActive"); assertEquals(-1,runTool("badCommand")); assertOutputContains("Bad command 'badCommand'"); assertEquals(-1,runTool("-badCommand")); assertOutputContains("badCommand: Unknown"); assertEquals(-1,runTool("-transitionToActive")); assertOutputContains("transitionToActive: incorrect number of arguments"); assertEquals(-1,runTool("-transitionToActive","x","y")); assertOutputContains("transitionToActive: incorrect number of arguments"); assertEquals(-1,runTool("-failover")); assertOutputContains("failover: incorrect arguments"); assertOutputContains("failover: incorrect arguments"); assertEquals(-1,runTool("-failover","foo:1234")); assertOutputContains("failover: incorrect arguments"); }

EqualityVerifier 
@Test public void testHelp() throws Exception { assertEquals(0,runTool("-help")); assertEquals(0,runTool("-help","transitionToActive")); assertOutputContains("Transitions the service into Active"); }

Class: org.apache.hadoop.ha.TestNodeFencer

BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testWhitespaceAndCommentsInConfig() throws BadFencingConfigurationException { NodeFencer fencer=setupFencer("\n" + " # the next one will always fail\n" + " " + AlwaysFailFencer.class.getName() + "(foo) # <- fails\n"+ AlwaysSucceedFencer.class.getName()+ "(bar) \n"); assertTrue(fencer.fence(MOCK_TARGET)); assertEquals(1,AlwaysFailFencer.fenceCalled); assertSame(MOCK_TARGET,AlwaysFailFencer.fencedSvc); assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertSame(MOCK_TARGET,AlwaysSucceedFencer.fencedSvc); assertEquals("foo",AlwaysFailFencer.callArgs.get(0)); assertEquals("bar",AlwaysSucceedFencer.callArgs.get(0)); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultipleFencers() throws BadFencingConfigurationException { NodeFencer fencer=setupFencer(AlwaysSucceedFencer.class.getName() + "(foo)\n" + AlwaysSucceedFencer.class.getName()+ "(bar)\n"); assertTrue(fencer.fence(MOCK_TARGET)); assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertEquals("foo",AlwaysSucceedFencer.callArgs.get(0)); }

BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testArglessFencer() throws BadFencingConfigurationException { NodeFencer fencer=setupFencer(AlwaysSucceedFencer.class.getName()); assertTrue(fencer.fence(MOCK_TARGET)); assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertSame(MOCK_TARGET,AlwaysSucceedFencer.fencedSvc); assertEquals(null,AlwaysSucceedFencer.callArgs.get(0)); }

BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleFencer() throws BadFencingConfigurationException { NodeFencer fencer=setupFencer(AlwaysSucceedFencer.class.getName() + "(foo)"); assertTrue(fencer.fence(MOCK_TARGET)); assertEquals(1,AlwaysSucceedFencer.fenceCalled); assertSame(MOCK_TARGET,AlwaysSucceedFencer.fencedSvc); assertEquals("foo",AlwaysSucceedFencer.callArgs.get(0)); }

Class: org.apache.hadoop.ha.TestShellCommandFencer

EqualityVerifier 
@Test public void testCommandAbbreviation(){ assertEquals("a...f",ShellCommandFencer.abbreviate("abcdef",5)); assertEquals("abcdef",ShellCommandFencer.abbreviate("abcdef",6)); assertEquals("abcdef",ShellCommandFencer.abbreviate("abcdef",7)); assertEquals("a...g",ShellCommandFencer.abbreviate("abcdefg",5)); assertEquals("a...h",ShellCommandFencer.abbreviate("abcdefgh",5)); assertEquals("a...gh",ShellCommandFencer.abbreviate("abcdefgh",6)); assertEquals("ab...gh",ShellCommandFencer.abbreviate("abcdefgh",7)); }

Class: org.apache.hadoop.ha.TestSshFenceByTcpPort

EqualityVerifier 
@Test public void testArgsParsing() throws BadFencingConfigurationException { Args args=new SshFenceByTcpPort.Args(null); assertEquals(System.getProperty("user.name"),args.user); assertEquals(22,args.sshPort); args=new SshFenceByTcpPort.Args(""); assertEquals(System.getProperty("user.name"),args.user); assertEquals(22,args.sshPort); args=new SshFenceByTcpPort.Args("12345"); assertEquals("12345",args.user); assertEquals(22,args.sshPort); args=new SshFenceByTcpPort.Args(":12345"); assertEquals(System.getProperty("user.name"),args.user); assertEquals(12345,args.sshPort); args=new SshFenceByTcpPort.Args("foo:2222"); assertEquals("foo",args.user); assertEquals(2222,args.sshPort); }

Class: org.apache.hadoop.ha.TestZKFailoverController

InternalCallVerifier EqualityVerifier 
/** * Test that the various command lines for formatting the ZK directory * function correctly. */ @Test(timeout=15000) public void testFormatZK() throws Exception { DummyHAService svc=cluster.getService(1); assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,runFC(svc)); assertEquals(0,runFC(svc,"-formatZK")); assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,runFC(svc,"-formatZK","-nonInteractive")); assertEquals(0,runFC(svc,"-formatZK","-force")); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that automatic failover won't run against a target that hasn't * explicitly enabled the feature. */ @Test(timeout=10000) public void testWontRunWhenAutoFailoverDisabled() throws Exception { DummyHAService svc=cluster.getService(1); svc=Mockito.spy(svc); Mockito.doReturn(false).when(svc).isAutoFailoverEnabled(); assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,runFC(svc,"-formatZK")); assertEquals(ZKFailoverController.ERR_CODE_AUTO_FAILOVER_NOT_ENABLED,runFC(svc)); }

InternalCallVerifier EqualityVerifier 
/** * Test that the ZKFC won't run if fencing is not configured for the * local service. */ @Test(timeout=15000) public void testFencingMustBeConfigured() throws Exception { DummyHAService svc=Mockito.spy(cluster.getService(0)); Mockito.doThrow(new BadFencingConfigurationException("no fencing")).when(svc).checkFencingConfigured(); assertEquals(0,runFC(svc,"-formatZK")); assertEquals(ZKFailoverController.ERR_CODE_NO_FENCER,runFC(svc)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=25000) public void testGracefulFailover() throws Exception { try { cluster.start(); cluster.waitForActiveLockHolder(0); cluster.getService(1).getZKFCProxy(conf,5000).gracefulFailover(); cluster.waitForActiveLockHolder(1); cluster.getService(0).getZKFCProxy(conf,5000).gracefulFailover(); cluster.waitForActiveLockHolder(0); Thread.sleep(10000); assertEquals(0,cluster.getService(0).fenceCount); assertEquals(0,cluster.getService(1).fenceCount); assertEquals(2,cluster.getService(0).activeTransitionCount); assertEquals(1,cluster.getService(1).activeTransitionCount); } finally { cluster.stop(); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=15000) public void testGracefulFailoverFailBecomingStandby() throws Exception { try { cluster.start(); cluster.waitForActiveLockHolder(0); cluster.setFailToBecomeStandby(0,true); cluster.getService(1).getZKFCProxy(conf,5000).gracefulFailover(); assertEquals(1,cluster.getService(0).fenceCount); } finally { cluster.stop(); } }

InternalCallVerifier EqualityVerifier 
@Test public void testFormatOneClusterLeavesOtherClustersAlone() throws Exception { DummyHAService svc=cluster.getService(1); DummyZKFC zkfcInOtherCluster=new DummyZKFC(conf,cluster.getService(1)){ @Override protected String getScopeInsideParentNode(){ return "other-scope"; } } ; assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,runFC(svc)); assertEquals(0,runFC(svc,"-formatZK")); assertEquals(ZKFailoverController.ERR_CODE_NO_PARENT_ZNODE,zkfcInOtherCluster.run(new String[]{})); assertEquals(0,zkfcInOtherCluster.run(new String[]{"-formatZK"})); assertEquals(ZKFailoverController.ERR_CODE_FORMAT_DENIED,runFC(svc,"-formatZK","-nonInteractive")); }

InternalCallVerifier EqualityVerifier 
/** * Test that if ZooKeeper is not running, the correct error * code is returned. */ @Test(timeout=15000) public void testNoZK() throws Exception { stopServer(); DummyHAService svc=cluster.getService(1); assertEquals(ZKFailoverController.ERR_CODE_NO_ZK,runFC(svc)); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=15000) public void testGracefulFailoverFailBecomingActive() throws Exception { try { cluster.start(); cluster.waitForActiveLockHolder(0); cluster.setFailToBecomeActive(1,true); try { cluster.getService(1).getZKFCProxy(conf,5000).gracefulFailover(); fail("Did not fail to graceful failover when target failed " + "to become active!"); } catch ( ServiceFailedException sfe) { GenericTestUtils.assertExceptionContains("Couldn't make " + cluster.getService(1) + " active",sfe); GenericTestUtils.assertExceptionContains("injected failure",sfe); } assertEquals(0,cluster.getService(0).fenceCount); assertEquals(0,cluster.getService(1).fenceCount); cluster.waitForActiveLockHolder(0); } finally { cluster.stop(); } }

InternalCallVerifier EqualityVerifier 
/** * Test that, when ZooKeeper fails, the system remains in its * current state, without triggering any failovers, and without * causing the active node to enter standby state. */ @Test(timeout=15000) public void testZooKeeperFailure() throws Exception { try { cluster.start(); long session0=cluster.getElector(0).getZKSessionIdForTests(); long session1=cluster.getElector(1).getZKSessionIdForTests(); LOG.info("====== Stopping ZK server"); stopServer(); waitForServerDown(hostPort,CONNECTION_TIMEOUT); LOG.info("====== Waiting for services to enter NEUTRAL mode"); cluster.waitForElectorState(0,ActiveStandbyElector.State.NEUTRAL); cluster.waitForElectorState(1,ActiveStandbyElector.State.NEUTRAL); LOG.info("====== Checking that the services didn't change HA state"); assertEquals(HAServiceState.ACTIVE,cluster.getService(0).state); assertEquals(HAServiceState.STANDBY,cluster.getService(1).state); LOG.info("====== Restarting server"); startServer(); waitForServerUp(hostPort,CONNECTION_TIMEOUT); cluster.waitForElectorState(0,ActiveStandbyElector.State.ACTIVE); cluster.waitForElectorState(1,ActiveStandbyElector.State.STANDBY); cluster.waitForHAState(0,HAServiceState.ACTIVE); cluster.waitForHAState(1,HAServiceState.STANDBY); assertEquals(session0,cluster.getElector(0).getZKSessionIdForTests()); assertEquals(session1,cluster.getElector(1).getZKSessionIdForTests()); } finally { cluster.stop(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the ZKFC can gracefully cede its active status. */ @Test(timeout=15000) public void testCedeActive() throws Exception { try { cluster.start(); DummyZKFC zkfc=cluster.getZkfc(0); assertEquals(ActiveStandbyElector.State.ACTIVE,zkfc.getElectorForTests().getStateForTests()); ZKFCProtocol proxy=zkfc.getLocalTarget().getZKFCProxy(conf,5000); long st=Time.now(); proxy.cedeActive(3000); long et=Time.now(); assertTrue("RPC to cedeActive took " + (et - st) + " ms",et - st < 1000); assertEquals(ActiveStandbyElector.State.INIT,zkfc.getElectorForTests().getStateForTests()); cluster.waitForElectorState(0,ActiveStandbyElector.State.STANDBY); long et2=Time.now(); assertTrue("Should take ~3 seconds to rejoin. Only took " + (et2 - et) + "ms before rejoining.",et2 - et > 2800); } finally { cluster.stop(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that, if ACLs are specified in the configuration, that * it sets the ACLs when formatting the parent node. */ @Test(timeout=15000) public void testFormatSetsAcls() throws Exception { DummyHAService svc=cluster.getService(1); assertEquals(0,runFC(svc,"-formatZK")); ZooKeeper otherClient=createClient(); try { Stat stat=new Stat(); otherClient.getData(ZKFailoverController.ZK_PARENT_ZNODE_DEFAULT,false,stat); fail("Was able to read data without authenticating!"); } catch ( KeeperException.NoAuthException nae) { } }

Class: org.apache.hadoop.hdfs.TestAbandonBlock

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testAbandonBlock() throws IOException { String src=FILE_NAME_PREFIX + "foo"; FSDataOutputStream fout=fs.create(new Path(src),true,4096,(short)1,512L); for (int i=0; i < 1024; i++) { fout.write(123); } fout.hflush(); long fileId=((DFSOutputStream)fout.getWrappedStream()).getFileId(); DFSClient dfsclient=DFSClientAdapter.getDFSClient(fs); LocatedBlocks blocks=dfsclient.getNamenode().getBlockLocations(src,0,Integer.MAX_VALUE); int orginalNumBlocks=blocks.locatedBlockCount(); LocatedBlock b=blocks.getLastLocatedBlock(); dfsclient.getNamenode().abandonBlock(b.getBlock(),fileId,src,dfsclient.clientName); dfsclient.getNamenode().abandonBlock(b.getBlock(),fileId,src,dfsclient.clientName); fout.close(); cluster.restartNameNode(); blocks=dfsclient.getNamenode().getBlockLocations(src,0,Integer.MAX_VALUE); Assert.assertEquals("Blocks " + b + " has not been abandoned.",orginalNumBlocks,blocks.locatedBlockCount() + 1); }

Class: org.apache.hadoop.hdfs.TestBalancerBandwidth

InternalCallVerifier EqualityVerifier 
@Test public void testBalancerBandwidth() throws Exception { conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,DEFAULT_BANDWIDTH); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build(); try { cluster.waitActive(); DistributedFileSystem fs=cluster.getFileSystem(); ArrayList datanodes=cluster.getDataNodes(); assertEquals(DEFAULT_BANDWIDTH,(long)datanodes.get(0).getBalancerBandwidth()); assertEquals(DEFAULT_BANDWIDTH,(long)datanodes.get(1).getBalancerBandwidth()); long newBandwidth=12 * DEFAULT_BANDWIDTH; fs.setBalancerBandwidth(newBandwidth); try { Thread.sleep(5000); } catch ( Exception e) { } assertEquals(newBandwidth,(long)datanodes.get(0).getBalancerBandwidth()); assertEquals(newBandwidth,(long)datanodes.get(1).getBalancerBandwidth()); fs.setBalancerBandwidth(0); try { Thread.sleep(5000); } catch ( Exception e) { } assertEquals(newBandwidth,(long)datanodes.get(0).getBalancerBandwidth()); assertEquals(newBandwidth,(long)datanodes.get(1).getBalancerBandwidth()); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestBlockReaderFactory

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * When an InterruptedException is sent to a thread calling * FileChannel#read, the FileChannel is immediately closed and the * thread gets an exception. This effectively means that we might have * someone asynchronously calling close() on the file descriptors we use * in BlockReaderLocal. So when unreferencing a ShortCircuitReplica in * ShortCircuitCache#unref, we should check if the FileChannel objects * are still open. If not, we should purge the replica to avoid giving * it out to any future readers. * This is a regression test for HDFS-6227: Short circuit read failed * due to ClosedChannelException. * Note that you may still get ClosedChannelException errors if two threads * are reading from the same replica and an InterruptedException is delivered * to one of them. */ @Test(timeout=120000) public void testPurgingClosedReplicas() throws Exception { BlockReaderTestUtil.enableBlockReaderFactoryTracing(); final AtomicInteger replicasCreated=new AtomicInteger(0); final AtomicBoolean testFailed=new AtomicBoolean(false); DFSInputStream.tcpReadsDisabledForTesting=true; BlockReaderFactory.createShortCircuitReplicaInfoCallback=new ShortCircuitCache.ShortCircuitReplicaCreator(){ @Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo(){ replicasCreated.incrementAndGet(); return null; } } ; TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration conf=createShortCircuitConf("testPurgingClosedReplicas",sockDir); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final DistributedFileSystem dfs=cluster.getFileSystem(); final String TEST_FILE="/test_file"; final int TEST_FILE_LEN=4095; final int SEED=0xFADE0; final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),conf); DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED); final Semaphore sem=new Semaphore(0); final List locatedBlocks=cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE,0,TEST_FILE_LEN).getLocatedBlocks(); final LocatedBlock lblock=locatedBlocks.get(0); final byte[] buf=new byte[TEST_FILE_LEN]; Runnable readerRunnable=new Runnable(){ @Override public void run(){ try { while (true) { BlockReader blockReader=null; try { blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN); sem.release(); try { blockReader.readAll(buf,0,TEST_FILE_LEN); } finally { sem.acquireUninterruptibly(); } } catch ( ClosedByInterruptException e) { LOG.info("got the expected ClosedByInterruptException",e); sem.release(); break; } finally { if (blockReader != null) blockReader.close(); } LOG.info("read another " + TEST_FILE_LEN + " bytes."); } } catch ( Throwable t) { LOG.error("getBlockReader failure",t); testFailed.set(true); sem.release(); } } } ; Thread thread=new Thread(readerRunnable); thread.start(); while (thread.isAlive()) { sem.acquireUninterruptibly(); thread.interrupt(); sem.release(); } Assert.assertFalse(testFailed.get()); BlockReader blockReader=null; try { blockReader=BlockReaderTestUtil.getBlockReader(cluster,lblock,0,TEST_FILE_LEN); blockReader.readFully(buf,0,TEST_FILE_LEN); } finally { if (blockReader != null) blockReader.close(); } byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(buf,expected)); Assert.assertEquals(2,replicasCreated.get()); dfs.close(); cluster.shutdown(); sockDir.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that a client which supports short-circuit reads using * shared memory can fall back to not using shared memory when * the server doesn't support it. */ @Test public void testShortCircuitReadFromServerWithoutShm() throws Exception { TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration clientConf=createShortCircuitConf("testShortCircuitReadFromServerWithoutShm",sockDir); Configuration serverConf=new Configuration(clientConf); serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0); DFSInputStream.tcpReadsDisabledForTesting=true; final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build(); cluster.waitActive(); clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromServerWithoutShm_clientContext"); final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf); final String TEST_FILE="/test_file"; final int TEST_FILE_LEN=4000; final int SEED=0xFADEC; DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED); byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE)); byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents,expected)); final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache(); final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertEquals(1,info.size()); PerDatanodeVisitorInfo vinfo=info.get(datanode); Assert.assertTrue(vinfo.disabled); Assert.assertEquals(0,vinfo.full.size()); Assert.assertEquals(0,vinfo.notFull.size()); } } ); cluster.shutdown(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test that a client which does not support short-circuit reads using * shared memory can talk with a server which supports it. */ @Test public void testShortCircuitReadFromClientWithoutShm() throws Exception { TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration clientConf=createShortCircuitConf("testShortCircuitReadWithoutShm",sockDir); Configuration serverConf=new Configuration(clientConf); DFSInputStream.tcpReadsDisabledForTesting=true; final MiniDFSCluster cluster=new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build(); cluster.waitActive(); clientConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,0); clientConf.set(DFS_CLIENT_CONTEXT,"testShortCircuitReadFromClientWithoutShm_clientContext"); final DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(0),clientConf); final String TEST_FILE="/test_file"; final int TEST_FILE_LEN=4000; final int SEED=0xFADEC; DFSTestUtil.createFile(fs,new Path(TEST_FILE),TEST_FILE_LEN,(short)1,SEED); byte contents[]=DFSTestUtil.readFileBuffer(fs,new Path(TEST_FILE)); byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents,expected)); final ShortCircuitCache cache=fs.dfs.getClientContext().getShortCircuitCache(); Assert.assertEquals(null,cache.getDfsClientShmManager()); cluster.shutdown(); }

Class: org.apache.hadoop.hdfs.TestBlockReaderLocalLegacy

AssumptionSetter EqualityVerifier HybridVerifier 
@Test public void testBothOldAndNewShortCircuitConfigured() throws Exception { final short REPL_FACTOR=1; final int FILE_LENGTH=512; Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason()); TemporarySocketDirectory socketDir=new TemporarySocketDirectory(); HdfsConfiguration conf=getConfiguration(socketDir); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); socketDir.close(); FileSystem fs=cluster.getFileSystem(); Path path=new Path("/foo"); byte orig[]=new byte[FILE_LENGTH]; for (int i=0; i < orig.length; i++) { orig[i]=(byte)(i % 10); } FSDataOutputStream fos=fs.create(path,(short)1); fos.write(orig); fos.close(); DFSTestUtil.waitReplication(fs,path,REPL_FACTOR); FSDataInputStream fis=cluster.getFileSystem().open(path); byte buf[]=new byte[FILE_LENGTH]; IOUtils.readFully(fis,buf,0,FILE_LENGTH); fis.close(); Assert.assertArrayEquals(orig,buf); Arrays.equals(orig,buf); cluster.shutdown(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that, in the case of an error, the position and limit of a ByteBuffer * are left unchanged. This is not mandated by ByteBufferReadable, but clients * of this class might immediately issue a retry on failure, so it's polite. */ @Test public void testStablePositionAfterCorruptRead() throws Exception { final short REPL_FACTOR=1; final long FILE_LENGTH=512L; HdfsConfiguration conf=getConfiguration(null); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); Path path=new Path("/corrupted"); DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L); DFSTestUtil.waitReplication(fs,path,REPL_FACTOR); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path); int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block); assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted); FSDataInputStream dis=cluster.getFileSystem().open(path); ByteBuffer buf=ByteBuffer.allocateDirect((int)FILE_LENGTH); boolean sawException=false; try { dis.read(buf); } catch ( ChecksumException ex) { sawException=true; } assertTrue(sawException); assertEquals(0,buf.position()); assertEquals(buf.capacity(),buf.limit()); dis=cluster.getFileSystem().open(path); buf.position(3); buf.limit(25); sawException=false; try { dis.read(buf); } catch ( ChecksumException ex) { sawException=true; } assertTrue(sawException); assertEquals(3,buf.position()); assertEquals(25,buf.limit()); cluster.shutdown(); }

Class: org.apache.hadoop.hdfs.TestBlocksScheduledCounter

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBlocksScheduledCounter() throws IOException { MiniDFSCluster cluster=new MiniDFSCluster.Builder(new HdfsConfiguration()).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); FSDataOutputStream out=fs.create(new Path("/testBlockScheduledCounter")); for (int i=0; i < 1024; i++) { out.write(i); } out.hflush(); ArrayList dnList=new ArrayList(); final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager(); dm.fetchDatanodes(dnList,dnList,false); DatanodeDescriptor dn=dnList.get(0); assertEquals(1,dn.getBlocksScheduled()); out.close(); assertEquals(0,dn.getBlocksScheduled()); }

Class: org.apache.hadoop.hdfs.TestClientProtocolForPipelineRecovery

InternalCallVerifier EqualityVerifier 
/** * Test recovery on restart OOB message. It also tests the delivery of * OOB ack originating from the primary datanode. Since there is only * one node in the cluster, failure of restart-recovery will fail the * test. */ @Test public void testPipelineRecoveryOnOOB() throws Exception { Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,"15"); MiniDFSCluster cluster=null; try { int numDataNodes=1; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); FileSystem fileSys=cluster.getFileSystem(); Path file=new Path("dataprotocol2.dat"); DFSTestUtil.createFile(fileSys,file,10240L,(short)1,0L); DFSOutputStream out=(DFSOutputStream)(fileSys.append(file).getWrappedStream()); out.write(1); out.hflush(); DFSAdmin dfsadmin=new DFSAdmin(conf); DataNode dn=cluster.getDataNodes().get(0); final String dnAddr=dn.getDatanodeId().getIpcAddr(false); final String[] args1={"-shutdownDatanode",dnAddr,"upgrade"}; Assert.assertEquals(0,dfsadmin.run(args1)); Thread.sleep(4000); cluster.restartDataNode(0,true); out.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier EqualityVerifier 
/** * Test restart timeout */ @Test public void testPipelineRecoveryOnRestartFailure() throws Exception { Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,"5"); MiniDFSCluster cluster=null; try { int numDataNodes=2; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); FileSystem fileSys=cluster.getFileSystem(); Path file=new Path("dataprotocol3.dat"); DFSTestUtil.createFile(fileSys,file,10240L,(short)2,0L); DFSOutputStream out=(DFSOutputStream)(fileSys.append(file).getWrappedStream()); out.write(1); out.hflush(); DFSAdmin dfsadmin=new DFSAdmin(conf); DataNode dn=cluster.getDataNodes().get(0); final String dnAddr1=dn.getDatanodeId().getIpcAddr(false); final String[] args1={"-shutdownDatanode",dnAddr1,"upgrade"}; Assert.assertEquals(0,dfsadmin.run(args1)); Thread.sleep(4000); out.close(); out=(DFSOutputStream)(fileSys.append(file).getWrappedStream()); out.write(1); out.hflush(); dn=cluster.getDataNodes().get(1); final String dnAddr2=dn.getDatanodeId().getIpcAddr(false); final String[] args2={"-shutdownDatanode",dnAddr2,"upgrade"}; Assert.assertEquals(0,dfsadmin.run(args2)); Thread.sleep(4000); try { out.close(); assert false; } catch ( IOException ioe) { } } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestConnCache

InternalCallVerifier EqualityVerifier 
/** * Read a file served entirely from one DN. Seek around and read from * different offsets. And verify that they all use the same socket. * @throws Exception */ @Test public void testReadFromOneDN() throws Exception { HdfsConfiguration configuration=new HdfsConfiguration(); final String contextName="testReadFromOneDNContext"; configuration.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,contextName); configuration.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,100000000L); BlockReaderTestUtil util=new BlockReaderTestUtil(1,configuration); final Path testFile=new Path("/testConnCache.dat"); byte authenticData[]=util.writeFile(testFile,FILE_SIZE / 1024); DFSClient client=new DFSClient(new InetSocketAddress("localhost",util.getCluster().getNameNodePort()),util.getConf()); ClientContext cacheContext=ClientContext.get(contextName,client.getConf()); DFSInputStream in=client.open(testFile.toString()); LOG.info("opened " + testFile.toString()); byte[] dataBuf=new byte[BLOCK_SIZE]; pread(in,0,dataBuf,0,dataBuf.length,authenticData); pread(in,FILE_SIZE - dataBuf.length,dataBuf,0,dataBuf.length,authenticData); pread(in,1024,dataBuf,0,dataBuf.length,authenticData); pread(in,-1,dataBuf,0,dataBuf.length,authenticData); pread(in,64,dataBuf,0,dataBuf.length / 2,authenticData); in.close(); client.close(); Assert.assertEquals(1,ClientContext.getFromConf(configuration).getPeerCache().size()); }

Class: org.apache.hadoop.hdfs.TestDFSClientExcludedNodes

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testExcludedNodesForgiveness() throws IOException { conf.setLong(DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,2500); conf.setInt("io.bytes.per.checksum",512); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); List props=cluster.dataNodes; FileSystem fs=cluster.getFileSystem(); Path filePath=new Path("/testForgivingExcludedNodes"); byte[] bytes=new byte[256]; for (int index=0; index < bytes.length; index++) { bytes[index]='0'; } FSDataOutputStream out=fs.create(filePath,true,4096,(short)3,512); out.write(bytes); out.write(bytes); out.hflush(); DataNodeProperties two=cluster.stopDataNode(2); DataNodeProperties one=cluster.stopDataNode(1); out.write(bytes); out.write(bytes); out.hflush(); Assert.assertEquals(true,cluster.restartDataNode(one,true)); Assert.assertEquals(true,cluster.restartDataNode(two,true)); cluster.waitActive(); ThreadUtil.sleepAtLeastIgnoreInterrupts(5000); cluster.stopDataNode(0); try { out.write(bytes); out.hflush(); out.close(); } catch ( Exception e) { fail("Excluded DataNodes should be forgiven after a while and " + "not cause file writing exception of: '" + e.getMessage() + "'"); } }

Class: org.apache.hadoop.hdfs.TestDFSClientFailover

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that client failover works when an active NN dies and the standby * takes over. */ @Test public void testDfsClientFailover() throws IOException, URISyntaxException { FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); DFSTestUtil.createFile(fs,TEST_FILE,FILE_LENGTH_TO_VERIFY,(short)1,1L); assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY); cluster.shutdownNameNode(0); cluster.transitionToActive(1); assertEquals(fs.getFileStatus(TEST_FILE).getLen(),FILE_LENGTH_TO_VERIFY); Path withPort=new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":"+ NameNode.DEFAULT_PORT+ "/"+ TEST_FILE.toUri().getPath()); FileSystem fs2=withPort.getFileSystem(fs.getConf()); assertTrue(fs2.exists(withPort)); fs.close(); }

Class: org.apache.hadoop.hdfs.TestDFSClientRetries

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that getAdditionalBlock() and close() are idempotent. This allows * a client to safely retry a call and still produce a correct * file. See HDFS-3031. */ @Test public void testIdempotentAllocateBlockAndClose() throws Exception { final String src="/testIdempotentAllocateBlock"; Path file=new Path(src); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,4096); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); NamenodeProtocols preSpyNN=cluster.getNameNodeRpc(); NamenodeProtocols spyNN=spy(preSpyNN); DFSClient client=new DFSClient(null,spyNN,conf,null); doAnswer(new Answer(){ @Override public LocatedBlock answer( InvocationOnMock invocation) throws Throwable { LocatedBlock ret=(LocatedBlock)invocation.callRealMethod(); LocatedBlocks lb=cluster.getNameNodeRpc().getBlockLocations(src,0,Long.MAX_VALUE); int blockCount=lb.getLocatedBlocks().size(); assertEquals(lb.getLastLocatedBlock().getBlock(),ret.getBlock()); LocatedBlock ret2=(LocatedBlock)invocation.callRealMethod(); lb=cluster.getNameNodeRpc().getBlockLocations(src,0,Long.MAX_VALUE); int blockCount2=lb.getLocatedBlocks().size(); assertEquals(lb.getLastLocatedBlock().getBlock(),ret2.getBlock()); assertEquals(blockCount,blockCount2); return ret2; } } ).when(spyNN).addBlock(Mockito.anyString(),Mockito.anyString(),Mockito.any(),Mockito.any(),Mockito.anyLong(),Mockito.any()); doAnswer(new Answer(){ @Override public Boolean answer( InvocationOnMock invocation) throws Throwable { LOG.info("Called complete(: " + Joiner.on(",").join(invocation.getArguments()) + ")"); if (!(Boolean)invocation.callRealMethod()) { LOG.info("Complete call returned false, not faking a retry RPC"); return false; } try { boolean ret=(Boolean)invocation.callRealMethod(); LOG.info("Complete call returned true, faked second RPC. " + "Returned: " + ret); return ret; } catch ( Throwable t) { LOG.error("Idempotent retry threw exception",t); throw t; } } } ).when(spyNN).complete(Mockito.anyString(),Mockito.anyString(),Mockito.any(),anyLong()); OutputStream stm=client.create(file.toString(),true); try { AppendTestUtil.write(stm,0,10000); stm.close(); stm=null; } finally { IOUtils.cleanup(LOG,stm); } Mockito.verify(spyNN,Mockito.atLeastOnce()).addBlock(Mockito.anyString(),Mockito.anyString(),Mockito.any(),Mockito.any(),Mockito.anyLong(),Mockito.any()); Mockito.verify(spyNN,Mockito.atLeastOnce()).complete(Mockito.anyString(),Mockito.anyString(),Mockito.any(),anyLong()); AppendTestUtil.check(fs,file,10000); } finally { cluster.shutdown(); } }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that checksum failures are recovered from by the next read on the same * DFSInputStream. Corruption information is not persisted from read call to * read call, so the client should expect consecutive calls to behave the same * way. See HDFS-3067. */ @Test public void testRetryOnChecksumFailure() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { final short REPL_FACTOR=1; final long FILE_LENGTH=512L; cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); Path path=new Path("/corrupted"); DFSTestUtil.createFile(fs,path,FILE_LENGTH,REPL_FACTOR,12345L); DFSTestUtil.waitReplication(fs,path,REPL_FACTOR); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,path); int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block); assertEquals("All replicas not corrupted",REPL_FACTOR,blockFilesCorrupted); InetSocketAddress nnAddr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(nnAddr,conf); DFSInputStream dis=client.open(path.toString()); byte[] arr=new byte[(int)FILE_LENGTH]; for (int i=0; i < 2; ++i) { try { dis.read(arr,0,(int)FILE_LENGTH); fail("Expected ChecksumException not thrown"); } catch ( Exception ex) { GenericTestUtils.assertExceptionContains("Checksum error",ex); } } } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetFileChecksum() throws Exception { final String f="/testGetFileChecksum"; final Path p=new Path(f); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); try { cluster.waitActive(); final FileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,p,1L << 20,(short)3,20100402L); final FileChecksum cs1=fs.getFileChecksum(p); assertTrue(cs1 != null); final List locatedblocks=DFSClient.callGetBlockLocations(cluster.getNameNodeRpc(),f,0,Long.MAX_VALUE).getLocatedBlocks(); final DatanodeInfo first=locatedblocks.get(0).getLocations()[0]; cluster.stopDataNode(first.getXferAddr()); final FileChecksum cs2=fs.getFileChecksum(p); assertEquals(cs1,cs2); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestDFSOutputStream

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * The close() method of DFSOutputStream should never throw the same exception * twice. See HDFS-5335 for details. */ @Test public void testCloseTwice() throws IOException { DistributedFileSystem fs=cluster.getFileSystem(); FSDataOutputStream os=fs.create(new Path("/test")); DFSOutputStream dos=(DFSOutputStream)Whitebox.getInternalState(os,"wrappedStream"); @SuppressWarnings("unchecked") AtomicReference ex=(AtomicReference)Whitebox.getInternalState(dos,"lastException"); Assert.assertEquals(null,ex.get()); dos.close(); IOException dummy=new IOException("dummy"); ex.set(dummy); try { dos.close(); } catch ( IOException e) { Assert.assertEquals(e,dummy); } Assert.assertEquals(null,ex.get()); dos.close(); }

Class: org.apache.hadoop.hdfs.TestDFSRemove

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemove() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { FileSystem fs=cluster.getFileSystem(); assertTrue(fs.mkdirs(dir)); long dfsUsedStart=getTotalDfsUsed(cluster); { final int fileCount=100; for (int i=0; i < fileCount; i++) { Path a=new Path(dir,"a" + i); createFile(fs,a); } long dfsUsedMax=getTotalDfsUsed(cluster); for (int i=0; i < fileCount; i++) { Path a=new Path(dir,"a" + i); fs.delete(a,false); } Thread.sleep(3 * DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000); long dfsUsedFinal=getTotalDfsUsed(cluster); assertEquals("All blocks should be gone. start=" + dfsUsedStart + " max="+ dfsUsedMax+ " final="+ dfsUsedFinal,dfsUsedStart,dfsUsedFinal); } fs.delete(dir,true); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestDFSRename

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRename() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { FileSystem fs=cluster.getFileSystem(); assertTrue(fs.mkdirs(dir)); { Path a=new Path(dir,"a"); Path aa=new Path(dir,"aa"); Path b=new Path(dir,"b"); createFile(fs,a); assertEquals(0,countLease(cluster)); DataOutputStream aa_out=fs.create(aa); aa_out.writeBytes("something"); assertEquals(1,countLease(cluster)); list(fs,"rename0"); fs.rename(a,b); list(fs,"rename1"); aa_out.writeBytes(" more"); aa_out.close(); list(fs,"rename2"); assertEquals(0,countLease(cluster)); } { Path dstPath=new Path("/c/d"); assertFalse(fs.exists(dstPath)); assertFalse(fs.rename(dir,dstPath)); } { Path src=new Path("/a/b"); Path dst=new Path("/a/b/c"); createFile(fs,new Path(src,"foo")); assertFalse(fs.rename(src,dst)); assertFalse(fs.rename(src.getParent(),dst.getParent())); } { Path src=new Path("/testPrefix"); Path dst=new Path("/testPrefixfile"); createFile(fs,src); assertTrue(fs.rename(src,dst)); } { Path src=new Path("/a/b/c"); createFile(fs,src); assertTrue(fs.rename(src,src)); assertFalse(fs.rename(new Path("/a/b"),new Path("/a/b/"))); assertTrue(fs.rename(src,new Path("/a/b/c/"))); } fs.delete(dir,true); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestDFSShell

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testGet() throws IOException { DFSTestUtil.setLogLevel2All(FSInputChecker.LOG); final String fname="testGet.txt"; Path root=new Path("/test/get"); final Path remotef=new Path(root,fname); final Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10); TestGetRunner runner=new TestGetRunner(){ private int count=0; private final FsShell shell=new FsShell(conf); public String run( int exitcode, String... options) throws IOException { String dst=new File(TEST_ROOT_DIR,fname + ++count).getAbsolutePath(); String[] args=new String[options.length + 3]; args[0]="-get"; args[args.length - 2]=remotef.toString(); args[args.length - 1]=dst; for (int i=0; i < options.length; i++) { args[i + 1]=options[i]; } show("args=" + Arrays.asList(args)); try { assertEquals(exitcode,shell.run(args)); } catch ( Exception e) { assertTrue(StringUtils.stringifyException(e),false); } return exitcode == 0 ? DFSTestUtil.readFile(new File(dst)) : null; } } ; File localf=createLocalFile(new File(TEST_ROOT_DIR,fname)); MiniDFSCluster cluster=null; DistributedFileSystem dfs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build(); dfs=cluster.getFileSystem(); mkdir(dfs,root); dfs.copyFromLocalFile(false,false,new Path(localf.getPath()),remotef); String localfcontent=DFSTestUtil.readFile(localf); assertEquals(localfcontent,runner.run(0)); assertEquals(localfcontent,runner.run(0,"-ignoreCrc")); List files=getBlockFiles(cluster); dfs.close(); cluster.shutdown(); show("files=" + files); corrupt(files); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(false).build(); dfs=cluster.getFileSystem(); assertEquals(null,runner.run(1)); String corruptedcontent=runner.run(0,"-ignoreCrc"); assertEquals(localfcontent.substring(1),corruptedcontent.substring(1)); assertEquals(localfcontent.charAt(0) + 1,corruptedcontent.charAt(0)); } finally { if (null != dfs) { try { dfs.close(); } catch ( Exception e) { } } if (null != cluster) { cluster.shutdown(); } localf.delete(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test to make sure that user namespace xattrs can be set only if path has * access and for sticky directorries, only owner/privileged user can write. * Trusted namespace xattrs can be set only with privileged users. * As user1: Create a directory (/foo) as user1, chown it to user1 (and * user1's group), grant rwx to "other". * As user2: Set an xattr (should pass with path access). * As user1: Set an xattr (should pass). * As user2: Read the xattr (should pass). Remove the xattr (should pass with * path access). * As user1: Read the xattr (should pass). Remove the xattr (should pass). * As user1: Change permissions only to owner * As User2: Set an Xattr (Should fail set with no path access) Remove an * Xattr (Should fail with no path access) * As SuperUser: Set an Xattr with Trusted (Should pass) */ @Test(timeout=30000) public void testSetXAttrPermissionAsDifferentOwner() throws Exception { final String USER1="user1"; final String GROUP1="supergroup"; final UserGroupInformation user1=UserGroupInformation.createUserForTesting(USER1,new String[]{GROUP1}); final UserGroupInformation user2=UserGroupInformation.createUserForTesting("user2",new String[]{"mygroup2"}); final UserGroupInformation SUPERUSER=UserGroupInformation.getCurrentUser(); MiniDFSCluster cluster=null; PrintStream bak=null; try { final Configuration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final FileSystem fs=cluster.getFileSystem(); fs.setOwner(new Path("/"),USER1,GROUP1); bak=System.err; final FsShell fshell=new FsShell(conf); final ByteArrayOutputStream out=new ByteArrayOutputStream(); System.setErr(new PrintStream(out)); user1.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final int ret=ToolRunner.run(fshell,new String[]{"-mkdir","/foo"}); assertEquals("Return should be 0",0,ret); out.reset(); return null; } } ); user1.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final int ret=ToolRunner.run(fshell,new String[]{"-chmod","707","/foo"}); assertEquals("Return should be 0",0,ret); out.reset(); return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); return null; } } ); user1.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); return null; } } ); user1.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { return null; } } ); user1.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final int ret=ToolRunner.run(fshell,new String[]{"-chmod","700","/foo"}); assertEquals("Return should be 0",0,ret); out.reset(); return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a2","/foo"}); assertEquals("Returned should be 1",1,ret); final String str=out.toString(); assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a2","/foo"}); assertEquals("Returned should be 1",1,ret); final String str=out.toString(); assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); return null; } } ); SUPERUSER.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","trusted.a3","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); return null; } } ); } finally { if (bak != null) { System.setErr(bak); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testURIPaths() throws Exception { Configuration srcConf=new HdfsConfiguration(); Configuration dstConf=new HdfsConfiguration(); MiniDFSCluster srcCluster=null; MiniDFSCluster dstCluster=null; File bak=new File(PathUtils.getTestDir(getClass()),"dfs_tmp_uri"); bak.mkdirs(); try { srcCluster=new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build(); dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,bak.getAbsolutePath()); dstCluster=new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build(); FileSystem srcFs=srcCluster.getFileSystem(); FileSystem dstFs=dstCluster.getFileSystem(); FsShell shell=new FsShell(); shell.setConf(srcConf); String[] argv=new String[2]; argv[0]="-ls"; argv[1]=dstFs.getUri().toString() + "/"; int ret=ToolRunner.run(shell,argv); assertEquals("ls works on remote uri ",0,ret); dstFs.mkdirs(new Path("/hadoopdir")); argv=new String[2]; argv[0]="-rmr"; argv[1]=dstFs.getUri().toString() + "/hadoopdir"; ret=ToolRunner.run(shell,argv); assertEquals("-rmr works on remote uri " + argv[1],0,ret); argv[0]="-du"; argv[1]=dstFs.getUri().toString() + "/"; ret=ToolRunner.run(shell,argv); assertEquals("du works on remote uri ",0,ret); File furi=new File(TEST_ROOT_DIR,"furi"); createLocalFile(furi); argv=new String[3]; argv[0]="-put"; argv[1]=furi.toURI().toString(); argv[2]=dstFs.getUri().toString() + "/furi"; ret=ToolRunner.run(shell,argv); assertEquals(" put is working ",0,ret); argv[0]="-cp"; argv[1]=dstFs.getUri().toString() + "/furi"; argv[2]=srcFs.getUri().toString() + "/furi"; ret=ToolRunner.run(shell,argv); assertEquals(" cp is working ",0,ret); assertTrue(srcFs.exists(new Path("/furi"))); argv=new String[2]; argv[0]="-cat"; argv[1]=dstFs.getUri().toString() + "/furi"; ret=ToolRunner.run(shell,argv); assertEquals(" cat is working ",0,ret); dstFs.delete(new Path("/furi"),true); dstFs.delete(new Path("/hadoopdir"),true); String file="/tmp/chownTest"; Path path=new Path(file); Path parent=new Path("/tmp"); Path root=new Path("/"); TestDFSShell.writeFile(dstFs,path); runCmd(shell,"-chgrp","-R","herbivores",dstFs.getUri().toString() + "/*"); confirmOwner(null,"herbivores",dstFs,parent,path); runCmd(shell,"-chown","-R",":reptiles",dstFs.getUri().toString() + "/"); confirmOwner(null,"reptiles",dstFs,root,parent,path); argv[0]="-cat"; argv[1]="hdfs:///furi"; ret=ToolRunner.run(shell,argv); assertEquals(" default works for cat",0,ret); argv[0]="-ls"; argv[1]="hdfs:///"; ret=ToolRunner.run(shell,argv); assertEquals("default works for ls ",0,ret); argv[0]="-rmr"; argv[1]="hdfs:///furi"; ret=ToolRunner.run(shell,argv); assertEquals("default works for rm/rmr",0,ret); } finally { if (null != srcCluster) { srcCluster.shutdown(); } if (null != dstCluster) { dstCluster.shutdown(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests various options of DFSShell. */ @Test(timeout=120000) public void testDFSShell() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); DistributedFileSystem fileSys=(DistributedFileSystem)fs; FsShell shell=new FsShell(); shell.setConf(conf); try { Path myPath=new Path("/test/mkdirs"); assertTrue(fileSys.mkdirs(myPath)); assertTrue(fileSys.exists(myPath)); assertTrue(fileSys.mkdirs(myPath)); Path myFile=new Path("/test/mkdirs/myFile"); writeFile(fileSys,myFile); assertTrue(fileSys.exists(myFile)); Path myFile2=new Path("/test/mkdirs/myFile2"); writeFile(fileSys,myFile2); assertTrue(fileSys.exists(myFile2)); { String[] args=new String[2]; args[0]="-rm"; args[1]="/test/mkdirs/myFile*"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val == 0); assertFalse(fileSys.exists(myFile)); assertFalse(fileSys.exists(myFile2)); writeFile(fileSys,myFile); assertTrue(fileSys.exists(myFile)); writeFile(fileSys,myFile2); assertTrue(fileSys.exists(myFile2)); } { String[] args=new String[3]; args[0]="-cat"; args[1]="/test/mkdirs/myFile"; args[2]="/test/mkdirs/myFile2"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run: " + StringUtils.stringifyException(e)); } assertTrue(val == 0); } fileSys.delete(myFile2,true); { String[] args=new String[2]; args[0]="-cat"; args[1]="/test/mkdirs/myFile1"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val != 0); } { String[] args=new String[2]; args[0]="-rm"; args[1]="/test/mkdirs/myFile1"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val != 0); } { String[] args=new String[2]; args[0]="-rm"; args[1]="/test/mkdirs/myFile"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertTrue(val == 0); } { String[] args; int val; args=new String[3]; args[0]="-test"; args[1]="-e"; args[2]="/test/mkdirs/noFileHere"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); args[1]="-z"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); args=new String[2]; args[0]="-touchz"; args[1]="/test/mkdirs/isFileHere"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); args=new String[2]; args[0]="-touchz"; args[1]="/test/mkdirs/thisDirNotExists/isFileHere"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); args=new String[3]; args[0]="-test"; args[1]="-e"; args[2]="/test/mkdirs/isFileHere"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); args[1]="-d"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); args[1]="-z"; val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); } { String[] args=new String[2]; args[0]="-mkdir"; args[1]="/test/dir1"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); String[] args1=new String[3]; args1[0]="-cp"; args1[1]="/test/dir1"; args1[2]="/test/dir1/dir2"; val=0; try { val=shell.run(args1); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); args1[0]="-cp"; args1[1]="/test/dir1"; args1[2]="/test/dir1foo"; val=-1; try { val=shell.run(args1); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); } { String[] args=new String[3]; args[0]="-test"; args[1]="-f"; args[2]="/test/mkdirs/noFileHere"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); } { String[] args=new String[3]; args[0]="-test"; args[1]="-f"; args[2]="/test/mkdirs"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); } { writeFile(fileSys,myFile); assertTrue(fileSys.exists(myFile)); String[] args=new String[3]; args[0]="-test"; args[1]="-f"; args[2]=myFile.toString(); int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); } { String[] args=new String[3]; args[0]="-test"; args[1]="-s"; args[2]="/test/mkdirs/noFileHere"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); } { String[] args=new String[3]; args[0]="-test"; args[1]="-s"; args[2]="/test/mkdirs/isFileHere"; int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(1,val); } { String[] args=new String[3]; args[0]="-test"; args[1]="-s"; args[2]=myFile.toString(); int val=-1; try { val=shell.run(args); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } assertEquals(0,val); } } finally { try { fileSys.close(); } catch ( Exception e) { } cluster.shutdown(); } }

InternalCallVerifier EqualityVerifier 
/** * default setting is file:// which is not a DFS * so DFSAdmin should throw and catch InvalidArgumentException * and return -1 exit code. * @throws Exception */ @Test(timeout=30000) public void testInvalidShell() throws Exception { Configuration conf=new Configuration(); DFSAdmin admin=new DFSAdmin(); admin.setConf(conf); int res=admin.run(new String[]{"-refreshNodes"}); assertEquals("expected to fail -1",res,-1); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testSetXAttrPermission() throws Exception { UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); MiniDFSCluster cluster=null; PrintStream bak=null; try { final Configuration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); Path p=new Path("/foo"); fs.mkdirs(p); bak=System.err; final FsShell fshell=new FsShell(conf); final ByteArrayOutputStream out=new ByteArrayOutputStream(); System.setErr(new PrintStream(out)); fs.setPermission(p,new FsPermission((short)0700)); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"}); assertEquals("Returned should be 1",1,ret); String str=out.toString(); assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); return null; } } ); int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); fs.setPermission(p,new FsPermission((short)0750)); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"}); assertEquals("Returned should be 1",1,ret); String str=out.toString(); assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); ret=ToolRunner.run(fshell,new String[]{"-setfattr","-x","user.a1","/foo"}); assertEquals("Returned should be 1",1,ret); str=out.toString(); assertTrue("Permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); return null; } } ); } finally { if (bak != null) { System.setErr(bak); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testCount() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); DistributedFileSystem dfs=cluster.getFileSystem(); FsShell shell=new FsShell(); shell.setConf(conf); try { String root=createTree(dfs,"count"); runCount(root,2,4,shell); runCount(root + "2",2,1,shell); runCount(root + "2/f1",0,1,shell); runCount(root + "2/sub",1,0,shell); final FileSystem localfs=FileSystem.getLocal(conf); Path localpath=new Path(TEST_ROOT_DIR,"testcount"); localpath=localpath.makeQualified(localfs.getUri(),localfs.getWorkingDirectory()); localfs.mkdirs(localpath); final String localstr=localpath.toString(); System.out.println("localstr=" + localstr); runCount(localstr,1,0,shell); assertEquals(0,runCmd(shell,"-count",root,localstr)); } finally { try { dfs.close(); } catch ( Exception e) { } cluster.shutdown(); } }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=30000) public void testCopyCommandsWithForceOption() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); FsShell shell=null; FileSystem fs=null; final File localFile=new File(TEST_ROOT_DIR,"testFileForPut"); final String localfilepath=new Path(localFile.getAbsolutePath()).toUri().toString(); final String testdir="/tmp/TestDFSShell-testCopyCommandsWithForceOption-" + counter.getAndIncrement(); final Path hdfsTestDir=new Path(testdir); try { fs=cluster.getFileSystem(); fs.mkdirs(hdfsTestDir); localFile.createNewFile(); writeFile(fs,new Path(testdir,"testFileForPut")); shell=new FsShell(); String[] argv=new String[]{"-put","-f",localfilepath,testdir}; int res=ToolRunner.run(shell,argv); assertEquals("put -f is not working",SUCCESS,res); argv=new String[]{"-put",localfilepath,testdir}; res=ToolRunner.run(shell,argv); assertEquals("put command itself is able to overwrite the file",ERROR,res); argv=new String[]{"-copyFromLocal","-f",localfilepath,testdir}; res=ToolRunner.run(shell,argv); assertEquals("copyFromLocal -f is not working",SUCCESS,res); argv=new String[]{"-copyFromLocal",localfilepath,testdir}; res=ToolRunner.run(shell,argv); assertEquals("copyFromLocal command itself is able to overwrite the file",ERROR,res); argv=new String[]{"-cp","-f",localfilepath,testdir}; res=ToolRunner.run(shell,argv); assertEquals("cp -f is not working",SUCCESS,res); argv=new String[]{"-cp",localfilepath,testdir}; res=ToolRunner.run(shell,argv); assertEquals("cp command itself is able to overwrite the file",ERROR,res); } finally { if (null != shell) shell.close(); if (localFile.exists()) localFile.delete(); if (null != fs) { fs.delete(hdfsTestDir,true); fs.close(); } cluster.shutdown(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRemoteException() throws Exception { UserGroupInformation tmpUGI=UserGroupInformation.createUserForTesting("tmpname",new String[]{"mygroup"}); MiniDFSCluster dfs=null; PrintStream bak=null; try { final Configuration conf=new HdfsConfiguration(); dfs=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=dfs.getFileSystem(); Path p=new Path("/foo"); fs.mkdirs(p); fs.setPermission(p,new FsPermission((short)0700)); bak=System.err; tmpUGI.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { FsShell fshell=new FsShell(conf); ByteArrayOutputStream out=new ByteArrayOutputStream(); PrintStream tmp=new PrintStream(out); System.setErr(tmp); String[] args=new String[2]; args[0]="-ls"; args[1]="/foo"; int ret=ToolRunner.run(fshell,args); assertEquals("returned should be 1",1,ret); String str=out.toString(); assertTrue("permission denied printed",str.indexOf("Permission denied") != -1); out.reset(); return null; } } ); } finally { if (bak != null) { System.setErr(bak); } if (dfs != null) { dfs.shutdown(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testZeroSizeFile() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); final DistributedFileSystem dfs=(DistributedFileSystem)fs; try { final File f1=new File(TEST_ROOT_DIR,"f1"); assertTrue(!f1.exists()); assertTrue(f1.createNewFile()); assertTrue(f1.exists()); assertTrue(f1.isFile()); assertEquals(0L,f1.length()); final Path root=mkdir(dfs,new Path("/test/zeroSizeFile")); final Path remotef=new Path(root,"dst"); show("copy local " + f1 + " to remote "+ remotef); dfs.copyFromLocalFile(false,false,new Path(f1.getPath()),remotef); show("Block size = " + dfs.getFileStatus(remotef).getBlockSize()); final File f2=new File(TEST_ROOT_DIR,"f2"); assertTrue(!f2.exists()); dfs.copyToLocalFile(remotef,new Path(f2.getPath())); assertTrue(f2.exists()); assertTrue(f2.isFile()); assertEquals(0L,f2.length()); f1.delete(); f2.delete(); } finally { try { dfs.close(); } catch ( Exception e) { } cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * check command error outputs and exit statuses. */ @Test(timeout=30000) public void testErrOutPut() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; PrintStream bak=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem srcFs=cluster.getFileSystem(); Path root=new Path("/nonexistentfile"); bak=System.err; ByteArrayOutputStream out=new ByteArrayOutputStream(); PrintStream tmp=new PrintStream(out); System.setErr(tmp); String[] argv=new String[2]; argv[0]="-cat"; argv[1]=root.toUri().getPath(); int ret=ToolRunner.run(new FsShell(),argv); assertEquals(" -cat returned 1 ",1,ret); String returned=out.toString(); assertTrue("cat does not print exceptions ",(returned.lastIndexOf("Exception") == -1)); out.reset(); argv[0]="-rm"; argv[1]=root.toString(); FsShell shell=new FsShell(); shell.setConf(conf); ret=ToolRunner.run(shell,argv); assertEquals(" -rm returned 1 ",1,ret); returned=out.toString(); out.reset(); assertTrue("rm prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1)); argv[0]="-rmr"; argv[1]=root.toString(); ret=ToolRunner.run(shell,argv); assertEquals(" -rmr returned 1",1,ret); returned=out.toString(); assertTrue("rmr prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1)); out.reset(); argv[0]="-du"; argv[1]="/nonexistentfile"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue(" -du prints reasonable error ",(returned.lastIndexOf("No such file or directory") != -1)); out.reset(); argv[0]="-dus"; argv[1]="/nonexistentfile"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue(" -dus prints reasonable error",(returned.lastIndexOf("No such file or directory") != -1)); out.reset(); argv[0]="-ls"; argv[1]="/nonexistenfile"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue(" -ls does not return Found 0 items",(returned.lastIndexOf("Found 0") == -1)); out.reset(); argv[0]="-ls"; argv[1]="/nonexistentfile"; ret=ToolRunner.run(shell,argv); assertEquals(" -lsr should fail ",1,ret); out.reset(); srcFs.mkdirs(new Path("/testdir")); argv[0]="-ls"; argv[1]="/testdir"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue(" -ls does not print out anything ",(returned.lastIndexOf("Found 0") == -1)); out.reset(); argv[0]="-ls"; argv[1]="/user/nonxistant/*"; ret=ToolRunner.run(shell,argv); assertEquals(" -ls on nonexistent glob returns 1",1,ret); out.reset(); argv[0]="-mkdir"; argv[1]="/testdir"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertEquals(" -mkdir returned 1 ",1,ret); assertTrue(" -mkdir returned File exists",(returned.lastIndexOf("File exists") != -1)); Path testFile=new Path("/testfile"); OutputStream outtmp=srcFs.create(testFile); outtmp.write(testFile.toString().getBytes()); outtmp.close(); out.reset(); argv[0]="-mkdir"; argv[1]="/testfile"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertEquals(" -mkdir returned 1",1,ret); assertTrue(" -mkdir returned this is a file ",(returned.lastIndexOf("not a directory") != -1)); out.reset(); argv=new String[3]; argv[0]="-mv"; argv[1]="/testfile"; argv[2]="file"; ret=ToolRunner.run(shell,argv); assertEquals("mv failed to rename",1,ret); out.reset(); argv=new String[3]; argv[0]="-mv"; argv[1]="/testfile"; argv[2]="/testfiletest"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue("no output from rename",(returned.lastIndexOf("Renamed") == -1)); out.reset(); argv[0]="-mv"; argv[1]="/testfile"; argv[2]="/testfiletmp"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertTrue(" unix like output",(returned.lastIndexOf("No such file or") != -1)); out.reset(); argv=new String[1]; argv[0]="-du"; srcFs.mkdirs(srcFs.getHomeDirectory()); ret=ToolRunner.run(shell,argv); returned=out.toString(); assertEquals(" no error ",0,ret); assertTrue("empty path specified",(returned.lastIndexOf("empty string") == -1)); out.reset(); argv=new String[3]; argv[0]="-test"; argv[1]="-d"; argv[2]="/no/such/dir"; ret=ToolRunner.run(shell,argv); returned=out.toString(); assertEquals(" -test -d wrong result ",1,ret); assertTrue(returned.isEmpty()); } finally { if (bak != null) { System.setErr(bak); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCopyToLocal() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); DistributedFileSystem dfs=(DistributedFileSystem)fs; FsShell shell=new FsShell(); shell.setConf(conf); try { String root=createTree(dfs,"copyToLocal"); { try { assertEquals(0,runCmd(shell,"-copyToLocal",root + "*",TEST_ROOT_DIR)); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } File localroot=new File(TEST_ROOT_DIR,"copyToLocal"); File localroot2=new File(TEST_ROOT_DIR,"copyToLocal2"); File f1=new File(localroot,"f1"); assertTrue("Copying failed.",f1.isFile()); File f2=new File(localroot,"f2"); assertTrue("Copying failed.",f2.isFile()); File sub=new File(localroot,"sub"); assertTrue("Copying failed.",sub.isDirectory()); File f3=new File(sub,"f3"); assertTrue("Copying failed.",f3.isFile()); File f4=new File(sub,"f4"); assertTrue("Copying failed.",f4.isFile()); File f5=new File(localroot2,"f1"); assertTrue("Copying failed.",f5.isFile()); f1.delete(); f2.delete(); f3.delete(); f4.delete(); f5.delete(); sub.delete(); } { String[] args={"-copyToLocal","nosuchfile",TEST_ROOT_DIR}; try { assertEquals(1,shell.run(args)); } catch ( Exception e) { System.err.println("Exception raised from DFSShell.run " + e.getLocalizedMessage()); } File f6=new File(TEST_ROOT_DIR,"nosuchfile"); assertTrue(!f6.exists()); } } finally { try { dfs.close(); } catch ( Exception e) { } cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testCopyCommandsToDirectoryWithPreserveOption() throws Exception { Configuration conf=new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); FsShell shell=null; FileSystem fs=null; final String testdir="/tmp/TestDFSShell-testCopyCommandsToDirectoryWithPreserveOption-" + counter.getAndIncrement(); final Path hdfsTestDir=new Path(testdir); try { fs=cluster.getFileSystem(); fs.mkdirs(hdfsTestDir); Path srcDir=new Path(hdfsTestDir,"srcDir"); fs.mkdirs(srcDir); fs.setAcl(srcDir,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE))); fs.setPermission(srcDir,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true)); Path srcFile=new Path(srcDir,"srcFile"); fs.create(srcFile).close(); FileStatus status=fs.getFileStatus(srcDir); final long mtime=status.getModificationTime(); final long atime=status.getAccessTime(); final String owner=status.getOwner(); final String group=status.getGroup(); final FsPermission perm=status.getPermission(); fs.setXAttr(srcDir,USER_A1,USER_A1_VALUE); fs.setXAttr(srcDir,TRUSTED_A1,TRUSTED_A1_VALUE); shell=new FsShell(conf); Path targetDir1=new Path(hdfsTestDir,"targetDir1"); String[] argv=new String[]{"-cp","-p",srcDir.toUri().toString(),targetDir1.toUri().toString()}; int ret=ToolRunner.run(shell,argv); assertEquals("cp -p is not working",SUCCESS,ret); FileStatus targetStatus=fs.getFileStatus(targetDir1); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); FsPermission targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); Map xattrs=fs.getXAttrs(targetDir1); assertTrue(xattrs.isEmpty()); List acls=fs.getAclStatus(targetDir1).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path targetDir2=new Path(hdfsTestDir,"targetDir2"); argv=new String[]{"-cp","-ptop",srcDir.toUri().toString(),targetDir2.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptop is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(targetDir2); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(targetDir2); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(targetDir2).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path targetDir3=new Path(hdfsTestDir,"targetDir3"); argv=new String[]{"-cp","-ptopx",srcDir.toUri().toString(),targetDir3.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptopx is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(targetDir3); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(targetDir3); assertEquals(xattrs.size(),2); assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1)); assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1)); acls=fs.getAclStatus(targetDir3).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path targetDir4=new Path(hdfsTestDir,"targetDir4"); argv=new String[]{"-cp","-ptopa",srcDir.toUri().toString(),targetDir4.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptopa is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(targetDir4); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(targetDir4); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(targetDir4).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetPerm.getAclBit()); assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir4)); Path targetDir5=new Path(hdfsTestDir,"targetDir5"); argv=new String[]{"-cp","-ptoa",srcDir.toUri().toString(),targetDir5.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptoa is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(targetDir5); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(targetDir5); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(targetDir5).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetPerm.getAclBit()); assertEquals(fs.getAclStatus(srcDir),fs.getAclStatus(targetDir5)); } finally { if (shell != null) { shell.close(); } if (fs != null) { fs.delete(hdfsTestDir,true); fs.close(); } cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testCopyCommandsPreserveAclAndStickyBit() throws Exception { Configuration conf=new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); FsShell shell=null; FileSystem fs=null; final String testdir="/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-" + counter.getAndIncrement(); final Path hdfsTestDir=new Path(testdir); try { fs=cluster.getFileSystem(); fs.mkdirs(hdfsTestDir); Path src=new Path(hdfsTestDir,"srcfile"); fs.create(src).close(); fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE))); fs.setPermission(src,new FsPermission(ALL,READ_EXECUTE,EXECUTE,true)); FileStatus status=fs.getFileStatus(src); final long mtime=status.getModificationTime(); final long atime=status.getAccessTime(); final String owner=status.getOwner(); final String group=status.getGroup(); final FsPermission perm=status.getPermission(); shell=new FsShell(conf); Path target1=new Path(hdfsTestDir,"targetfile1"); String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()}; int ret=ToolRunner.run(shell,argv); assertEquals("cp is not working",SUCCESS,ret); FileStatus targetStatus=fs.getFileStatus(target1); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); FsPermission targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); List acls=fs.getAclStatus(target1).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path target2=new Path(hdfsTestDir,"targetfile2"); argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target2.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptopa is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(target2); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); acls=fs.getAclStatus(target2).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetPerm.getAclBit()); assertEquals(fs.getAclStatus(src),fs.getAclStatus(target2)); } finally { if (null != shell) { shell.close(); } if (null != fs) { fs.delete(hdfsTestDir,true); fs.close(); } cluster.shutdown(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testGetFAttrErrors() throws Exception { final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); MiniDFSCluster cluster=null; PrintStream bakErr=null; try { final Configuration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final FileSystem fs=cluster.getFileSystem(); final Path p=new Path("/foo"); fs.mkdirs(p); bakErr=System.err; final FsShell fshell=new FsShell(conf); final ByteArrayOutputStream out=new ByteArrayOutputStream(); System.setErr(new PrintStream(out)); fs.setPermission(p,new FsPermission((short)0700)); { final int ret=ToolRunner.run(fshell,new String[]{"-setfattr","-n","user.a1","-v","1234","/foo"}); assertEquals("Returned should be 0",0,ret); out.reset(); } user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.a1","/foo"}); String str=out.toString(); assertTrue("xattr value was incorrectly returned",str.indexOf("1234") == -1); out.reset(); return null; } } ); { final int ret=ToolRunner.run(fshell,new String[]{"-getfattr","-n","user.nonexistent","/foo"}); String str=out.toString(); assertTrue("xattr value was incorrectly returned",str.indexOf("getfattr: At least one of the attributes provided was not found") >= 0); out.reset(); } } finally { if (bakErr != null) { System.setErr(bakErr); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testCopyCommandsWithPreserveOption() throws Exception { Configuration conf=new Configuration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY,true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); FsShell shell=null; FileSystem fs=null; final String testdir="/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-" + counter.getAndIncrement(); final Path hdfsTestDir=new Path(testdir); try { fs=cluster.getFileSystem(); fs.mkdirs(hdfsTestDir); Path src=new Path(hdfsTestDir,"srcfile"); fs.create(src).close(); fs.setAcl(src,Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"bar",READ_EXECUTE),aclEntry(ACCESS,OTHER,EXECUTE))); FileStatus status=fs.getFileStatus(src); final long mtime=status.getModificationTime(); final long atime=status.getAccessTime(); final String owner=status.getOwner(); final String group=status.getGroup(); final FsPermission perm=status.getPermission(); fs.setXAttr(src,USER_A1,USER_A1_VALUE); fs.setXAttr(src,TRUSTED_A1,TRUSTED_A1_VALUE); shell=new FsShell(conf); Path target1=new Path(hdfsTestDir,"targetfile1"); String[] argv=new String[]{"-cp","-p",src.toUri().toString(),target1.toUri().toString()}; int ret=ToolRunner.run(shell,argv); assertEquals("cp -p is not working",SUCCESS,ret); FileStatus targetStatus=fs.getFileStatus(target1); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); FsPermission targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); Map xattrs=fs.getXAttrs(target1); assertTrue(xattrs.isEmpty()); List acls=fs.getAclStatus(target1).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path target2=new Path(hdfsTestDir,"targetfile2"); argv=new String[]{"-cp","-ptop",src.toUri().toString(),target2.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptop is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(target2); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(target2); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(target2).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path target3=new Path(hdfsTestDir,"targetfile3"); argv=new String[]{"-cp","-ptopx",src.toUri().toString(),target3.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptopx is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(target3); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(target3); assertEquals(xattrs.size(),2); assertArrayEquals(USER_A1_VALUE,xattrs.get(USER_A1)); assertArrayEquals(TRUSTED_A1_VALUE,xattrs.get(TRUSTED_A1)); acls=fs.getAclStatus(target3).getEntries(); assertTrue(acls.isEmpty()); assertFalse(targetPerm.getAclBit()); Path target4=new Path(hdfsTestDir,"targetfile4"); argv=new String[]{"-cp","-ptopa",src.toUri().toString(),target4.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptopa is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(target4); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(target4); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(target4).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetPerm.getAclBit()); assertEquals(fs.getAclStatus(src),fs.getAclStatus(target4)); Path target5=new Path(hdfsTestDir,"targetfile5"); argv=new String[]{"-cp","-ptoa",src.toUri().toString(),target5.toUri().toString()}; ret=ToolRunner.run(shell,argv); assertEquals("cp -ptoa is not working",SUCCESS,ret); targetStatus=fs.getFileStatus(target5); assertEquals(mtime,targetStatus.getModificationTime()); assertEquals(atime,targetStatus.getAccessTime()); assertEquals(owner,targetStatus.getOwner()); assertEquals(group,targetStatus.getGroup()); targetPerm=targetStatus.getPermission(); assertTrue(perm.equals(targetPerm)); xattrs=fs.getXAttrs(target5); assertTrue(xattrs.isEmpty()); acls=fs.getAclStatus(target5).getEntries(); assertFalse(acls.isEmpty()); assertTrue(targetPerm.getAclBit()); assertEquals(fs.getAclStatus(src),fs.getAclStatus(target5)); } finally { if (null != shell) { shell.close(); } if (null != fs) { fs.delete(hdfsTestDir,true); fs.close(); } cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestDFSStartupVersions

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This test ensures the appropriate response (successful or failure) from * a Datanode when the system is started with differing version combinations. *
 * For each 3-tuple in the cross product
 * ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
 * {currentNamespaceId,incorrectNamespaceId},
 * {pastFsscTime,currentFsscTime,futureFsscTime})
 * 1. Startup Namenode with version file containing 
 * (currentLayoutVersion,currentNamespaceId,currentFsscTime)
 * 2. Attempt to startup Datanode with version file containing 
 * this iterations version 3-tuple
 * 
*/ @Test(timeout=300000) public void testVersions() throws Exception { UpgradeUtilities.initialize(); Configuration conf=UpgradeUtilities.initializeStorageStateConf(1,new HdfsConfiguration()); StorageData[] versions=initializeVersions(); UpgradeUtilities.createNameNodeStorageDirs(conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY),"current"); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).startupOption(StartupOption.REGULAR).build(); StorageData nameNodeVersion=new StorageData(HdfsConstants.NAMENODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),UpgradeUtilities.getCurrentBlockPoolID(cluster)); log("NameNode version info",NAME_NODE,null,nameNodeVersion); String bpid=UpgradeUtilities.getCurrentBlockPoolID(cluster); for (int i=0; i < versions.length; i++) { File[] storage=UpgradeUtilities.createDataNodeStorageDirs(conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY),"current"); log("DataNode version info",DATA_NODE,i,versions[i]); UpgradeUtilities.createDataNodeVersionFile(storage,versions[i].storageInfo,bpid,versions[i].blockPoolId); try { cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null); } catch ( Exception ignore) { } assertTrue(cluster.getNameNode() != null); assertEquals(isVersionCompatible(nameNodeVersion,versions[i]),cluster.isDataNodeUp()); cluster.shutdownDataNodes(); } }

Class: org.apache.hadoop.hdfs.TestDFSUpgrade

IterativeVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * This test attempts to upgrade the NameNode and DataNode under * a number of valid and invalid conditions. */ @Test(timeout=60000) public void testUpgrade() throws Exception { File[] baseDirs; StorageInfo storageInfo=null; for (int numDirs=1; numDirs <= 2; numDirs++) { conf=new HdfsConfiguration(); conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf); String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); String[] dataNodeDirs=conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); log("Normal NameNode upgrade",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); try { final DistributedFileSystem dfs=cluster.getFileSystem(); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); dfs.rollingUpgrade(RollingUpgradeAction.PREPARE); fail(); } catch ( RemoteException re) { assertEquals(InconsistentFSStateException.class.getName(),re.getClassName()); LOG.info("The exception is expected.",re); } checkNameNode(nameNodeDirs,EXPECTED_TXID); if (numDirs > 1) TestParallelImageWrite.checkImages(cluster.getNamesystem(),numDirs); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("Normal DataNode upgrade",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current"); cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null); checkDataNode(dataNodeDirs,UpgradeUtilities.getCurrentBlockPoolID(null)); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); log("NameNode upgrade with existing previous dir",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"previous"); startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("DataNode upgrade with existing previous dir",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current"); UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"previous"); cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null); checkDataNode(dataNodeDirs,UpgradeUtilities.getCurrentBlockPoolID(null)); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); log("DataNode upgrade with future stored layout version in current",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current"); storageInfo=new StorageInfo(Integer.MIN_VALUE,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),UpgradeUtilities.getCurrentFsscTime(cluster),NodeType.DATA_NODE); UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster)); startBlockPoolShouldFail(StartupOption.REGULAR,UpgradeUtilities.getCurrentBlockPoolID(null)); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); log("DataNode upgrade with newer fsscTime in current",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); baseDirs=UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs,"current"); storageInfo=new StorageInfo(HdfsConstants.DATANODE_LAYOUT_VERSION,UpgradeUtilities.getCurrentNamespaceID(cluster),UpgradeUtilities.getCurrentClusterID(cluster),Long.MAX_VALUE,NodeType.DATA_NODE); UpgradeUtilities.createDataNodeVersionFile(baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster)); startBlockPoolShouldFail(StartupOption.REGULAR,UpgradeUtilities.getCurrentBlockPoolID(null)); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); UpgradeUtilities.createEmptyDirs(dataNodeDirs); log("NameNode upgrade with no edits file",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); deleteStorageFilesWithPrefix(nameNodeDirs,"edits_"); startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode upgrade with no image file",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); deleteStorageFilesWithPrefix(nameNodeDirs,"fsimage_"); startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode upgrade with corrupt version file",numDirs); baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); for ( File f : baseDirs) { UpgradeUtilities.corruptFile(new File(f,"VERSION"),"layoutVersion".getBytes(Charsets.UTF_8),"xxxxxxxxxxxxx".getBytes(Charsets.UTF_8)); } startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode upgrade with old layout version in current",numDirs); baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); storageInfo=new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,UpgradeUtilities.getCurrentNamespaceID(null),UpgradeUtilities.getCurrentClusterID(null),UpgradeUtilities.getCurrentFsscTime(null),NodeType.NAME_NODE); UpgradeUtilities.createNameNodeVersionFile(conf,baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster)); startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); log("NameNode upgrade with future layout version in current",numDirs); baseDirs=UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); storageInfo=new StorageInfo(Integer.MIN_VALUE,UpgradeUtilities.getCurrentNamespaceID(null),UpgradeUtilities.getCurrentClusterID(null),UpgradeUtilities.getCurrentFsscTime(null),NodeType.NAME_NODE); UpgradeUtilities.createNameNodeVersionFile(conf,baseDirs,storageInfo,UpgradeUtilities.getCurrentBlockPoolID(cluster)); startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); } int numDirs=4; { conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,-1); conf=UpgradeUtilities.initializeStorageStateConf(numDirs,conf); String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY); log("Normal NameNode upgrade",numDirs); UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs,"current"); cluster=createCluster(); try { final DistributedFileSystem dfs=cluster.getFileSystem(); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); dfs.rollingUpgrade(RollingUpgradeAction.PREPARE); fail(); } catch ( RemoteException re) { assertEquals(InconsistentFSStateException.class.getName(),re.getClassName()); LOG.info("The exception is expected.",re); } checkNameNode(nameNodeDirs,EXPECTED_TXID); TestParallelImageWrite.checkImages(cluster.getNamesystem(),numDirs); cluster.shutdown(); UpgradeUtilities.createEmptyDirs(nameNodeDirs); } }

Class: org.apache.hadoop.hdfs.TestDFSUpgradeFromImage

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test upgrade from 2.0 image with a variety of .snapshot and .reserved * paths to test renaming on upgrade */ @Test public void testUpgradeFromRel2ReservedImage() throws Exception { unpackStorage(HADOOP2_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster=null; final Configuration conf=new Configuration(); try { cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build(); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("reserved path component in this version",e); } finally { if (cluster != null) { cluster.shutdown(); } } try { FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved"); cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build(); DistributedFileSystem dfs=cluster.getFileSystem(); final String[] expected=new String[]{"/edits","/edits/.reserved","/edits/.user-snapshot","/edits/.user-snapshot/editsdir","/edits/.user-snapshot/editsdir/editscontents","/edits/.user-snapshot/editsdir/editsdir2","/image","/image/.reserved","/image/.user-snapshot","/image/.user-snapshot/imagedir","/image/.user-snapshot/imagedir/imagecontents","/image/.user-snapshot/imagedir/imagedir2","/.my-reserved","/.my-reserved/edits-touch","/.my-reserved/image-touch"}; for (int i=0; i < 2; i++) { if (i == 1) { cluster.finalizeCluster(conf); cluster.restartNameNode(true); } ArrayList toList=new ArrayList(); toList.add(new Path("/")); ArrayList found=new ArrayList(); while (!toList.isEmpty()) { Path p=toList.remove(0); FileStatus[] statuses=dfs.listStatus(p); for ( FileStatus status : statuses) { final String path=status.getPath().toUri().getPath(); System.out.println("Found path " + path); found.add(path); if (status.isDirectory()) { toList.add(status.getPath()); } } } for ( String s : expected) { assertTrue("Did not find expected path " + s,found.contains(s)); } assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length); } } finally { if (cluster != null) { cluster.shutdown(); } } }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test upgrade from a branch-1.2 image with reserved paths */ @Test public void testUpgradeFromRel1ReservedImage() throws Exception { unpackStorage(HADOOP1_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster=null; final Configuration conf=new Configuration(); try { FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved"); cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build(); DistributedFileSystem dfs=cluster.getFileSystem(); final String[] expected=new String[]{"/.my-reserved","/.user-snapshot","/.user-snapshot/.user-snapshot","/.user-snapshot/open","/dir1","/dir1/.user-snapshot","/dir2","/dir2/.user-snapshot","/user","/user/andrew","/user/andrew/.user-snapshot"}; for (int i=0; i < 2; i++) { if (i == 1) { cluster.finalizeCluster(conf); cluster.restartNameNode(true); } ArrayList toList=new ArrayList(); toList.add(new Path("/")); ArrayList found=new ArrayList(); while (!toList.isEmpty()) { Path p=toList.remove(0); FileStatus[] statuses=dfs.listStatus(p); for ( FileStatus status : statuses) { final String path=status.getPath().toUri().getPath(); System.out.println("Found path " + path); found.add(path); if (status.isDirectory()) { toList.add(status.getPath()); } } } for ( String s : expected) { assertTrue("Did not find expected path " + s,found.contains(s)); } assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length); } } finally { if (cluster != null) { cluster.shutdown(); } } }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test upgrade from a 0.23.11 image with reserved paths */ @Test public void testUpgradeFromRel023ReservedImage() throws Exception { unpackStorage(HADOOP023_RESERVED_IMAGE,HADOOP_DFS_DIR_TXT); MiniDFSCluster cluster=null; final Configuration conf=new Configuration(); try { FSImageFormat.setRenameReservedPairs(".snapshot=.user-snapshot," + ".reserved=.my-reserved"); cluster=new MiniDFSCluster.Builder(conf).format(false).startupOption(StartupOption.UPGRADE).numDataNodes(0).build(); DistributedFileSystem dfs=cluster.getFileSystem(); final String[] expected=new String[]{"/.user-snapshot","/dir1","/dir1/.user-snapshot","/dir2","/dir2/.user-snapshot"}; for (int i=0; i < 2; i++) { if (i == 1) { cluster.finalizeCluster(conf); cluster.restartNameNode(true); } ArrayList toList=new ArrayList(); toList.add(new Path("/")); ArrayList found=new ArrayList(); while (!toList.isEmpty()) { Path p=toList.remove(0); FileStatus[] statuses=dfs.listStatus(p); for ( FileStatus status : statuses) { final String path=status.getPath().toUri().getPath(); System.out.println("Found path " + path); found.add(path); if (status.isDirectory()) { toList.add(status.getPath()); } } } for ( String s : expected) { assertTrue("Did not find expected path " + s,found.contains(s)); } assertEquals("Found an unexpected path while listing filesystem",found.size(),expected.length); } } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test upgrade from 0.22 image with corrupt md5, make sure it * fails to upgrade */ @Test public void testUpgradeFromCorruptRel22Image() throws IOException { unpackStorage(HADOOP22_IMAGE,HADOOP_DFS_DIR_TXT); File baseDir=new File(MiniDFSCluster.getBaseDirectory()); FSImageTestUtil.corruptVersionFile(new File(baseDir,"name1/current/VERSION"),"imageMD5Digest","22222222222222222222222222222222"); FSImageTestUtil.corruptVersionFile(new File(baseDir,"name2/current/VERSION"),"imageMD5Digest","22222222222222222222222222222222"); final LogVerificationAppender appender=new LogVerificationAppender(); final Logger logger=Logger.getRootLogger(); logger.addAppender(appender); try { upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).numDataNodes(4)); fail("Upgrade did not fail with bad MD5"); } catch ( IOException ioe) { String msg=StringUtils.stringifyException(ioe); if (!msg.contains("Failed to load an FSImage file")) { throw ioe; } int md5failures=appender.countExceptionsWithMessage(" is corrupt with MD5 checksum of "); assertEquals("Upgrade did not fail with bad MD5",1,md5failures); } }

Class: org.apache.hadoop.hdfs.TestDFSUtil

EqualityVerifier 
@Test(timeout=5000) public void testRelativeTimeConversion() throws Exception { try { DFSUtil.parseRelativeTime("1"); } catch ( IOException e) { assertExceptionContains("too short",e); } try { DFSUtil.parseRelativeTime("1z"); } catch ( IOException e) { assertExceptionContains("unknown time unit",e); } try { DFSUtil.parseRelativeTime("yyz"); } catch ( IOException e) { assertExceptionContains("is not a number",e); } assertEquals(61 * 1000,DFSUtil.parseRelativeTime("61s")); assertEquals(61 * 60 * 1000,DFSUtil.parseRelativeTime("61m")); assertEquals(0,DFSUtil.parseRelativeTime("0s")); assertEquals(25 * 60 * 60* 1000,DFSUtil.parseRelativeTime("25h")); assertEquals(4 * 24 * 60* 60* 1000l,DFSUtil.parseRelativeTime("4d")); assertEquals(999 * 24 * 60* 60* 1000l,DFSUtil.parseRelativeTime("999d")); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetPassword() throws Exception { File testDir=new File(System.getProperty("test.build.data","target/test-dir")); Configuration conf=new Configuration(); final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir+ "/test.jks"; File file=new File(testDir,"test.jks"); file.delete(); conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,ourUrl); CredentialProvider provider=CredentialProviderFactory.getProviders(conf).get(0); char[] keypass={'k','e','y','p','a','s','s'}; char[] storepass={'s','t','o','r','e','p','a','s','s'}; char[] trustpass={'t','r','u','s','t','p','a','s','s'}; assertEquals(null,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYPASSWORD_KEY)); assertEquals(null,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY)); assertEquals(null,provider.getCredentialEntry(DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY)); try { provider.createCredentialEntry(DFS_SERVER_HTTPS_KEYPASSWORD_KEY,keypass); provider.createCredentialEntry(DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY,storepass); provider.createCredentialEntry(DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY,trustpass); provider.flush(); } catch ( Exception e) { e.printStackTrace(); throw e; } assertArrayEquals(keypass,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYPASSWORD_KEY).getCredential()); assertArrayEquals(storepass,provider.getCredentialEntry(DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY).getCredential()); assertArrayEquals(trustpass,provider.getCredentialEntry(DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY).getCredential()); Assert.assertEquals("keypass",DFSUtil.getPassword(conf,DFS_SERVER_HTTPS_KEYPASSWORD_KEY)); Assert.assertEquals("storepass",DFSUtil.getPassword(conf,DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY)); Assert.assertEquals("trustpass",DFSUtil.getPassword(conf,DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY)); Assert.assertEquals(null,DFSUtil.getPassword(conf,"invalid-alias")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Ensure that fs.defaultFS is set in the configuration even if neither HA nor * Federation is enabled. * Regression test for HDFS-3351. */ @Test public void testConfModificationNoFederationOrHa(){ final HdfsConfiguration conf=new HdfsConfiguration(); String nsId=null; String nnId=null; conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"localhost:1234"); assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY))); NameNode.initializeGenericKeys(conf,nsId,nnId); assertEquals("hdfs://localhost:1234",conf.get(FS_DEFAULT_NAME_KEY)); }

InternalCallVerifier EqualityVerifier 
/** * Regression test for HDFS-2934. */ @Test public void testSomeConfsNNSpecificSomeNSSpecific(){ final HdfsConfiguration conf=new HdfsConfiguration(); String key=DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY; conf.set(key,"global-default"); conf.set(key + ".ns1","ns1-override"); conf.set(key + ".ns1.nn1","nn1-override"); Configuration newConf=new Configuration(conf); NameNode.initializeGenericKeys(newConf,"ns2","nn1"); assertEquals("global-default",newConf.get(key)); newConf=new Configuration(conf); NameNode.initializeGenericKeys(newConf,"ns2",null); assertEquals("global-default",newConf.get(key)); newConf=new Configuration(conf); NameNode.initializeGenericKeys(newConf,"ns1","nn2"); assertEquals("ns1-override",newConf.get(key)); newConf=new Configuration(conf); NameNode.initializeGenericKeys(newConf,"ns1","nn1"); assertEquals("nn1-override",newConf.get(key)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=5000) public void testGetSpnegoKeytabKey(){ HdfsConfiguration conf=new HdfsConfiguration(); String defaultKey="default.spengo.key"; conf.unset(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY); assertEquals("Test spnego key in config is null",defaultKey,DFSUtil.getSpnegoKeytabKey(conf,defaultKey)); conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,""); assertEquals("Test spnego key is empty",defaultKey,DFSUtil.getSpnegoKeytabKey(conf,defaultKey)); String spengoKey="spengo.key"; conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,spengoKey); assertEquals("Test spnego key is NOT null",DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,DFSUtil.getSpnegoKeytabKey(conf,defaultKey)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetNNUris() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); final String NS1_NN1_ADDR="ns1-nn1.example.com:8020"; final String NS1_NN2_ADDR="ns1-nn2.example.com:8020"; final String NS2_NN_ADDR="ns2-nn.example.com:8020"; final String NN1_ADDR="nn.example.com:8020"; final String NN1_SRVC_ADDR="nn.example.com:8021"; final String NN2_ADDR="nn2.example.com:8020"; conf.set(DFS_NAMESERVICES,"ns1,ns2"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2"); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_ADDR); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_ADDR); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns2"),NS2_NN_ADDR); conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,"hdfs://" + NN1_ADDR); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://" + NN2_ADDR); Collection uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(4,uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR))); assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR))); assertTrue(uris.contains(new URI("hdfs://" + NN2_ADDR))); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"viewfs://vfs-name.example.com"); uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(3,uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR))); assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR))); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://ns1"); uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(3,uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR))); assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR))); conf=new HdfsConfiguration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://" + NN1_ADDR); conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY,NN1_ADDR); conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,NN1_SRVC_ADDR); uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(1,uris.size()); assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR))); }

InternalCallVerifier EqualityVerifier 
/** * Test to ensure nameservice specific keys in the configuration are * copied to generic keys when the namenode starts. */ @Test public void testConfModificationFederationAndHa(){ final HdfsConfiguration conf=new HdfsConfiguration(); String nsId="ns1"; String nnId="nn1"; conf.set(DFS_NAMESERVICES,nsId); conf.set(DFS_NAMESERVICE_ID,nsId); conf.set(DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId,nnId); for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) { conf.set(DFSUtil.addKeySuffixes(key,nsId,nnId),key); } NameNode.initializeGenericKeys(conf,nsId,nnId); for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) { assertEquals(key,conf.get(key)); } }

APIUtilityVerifier EqualityVerifier 
@Test public void testGetInfoServer() throws IOException, URISyntaxException { HdfsConfiguration conf=new HdfsConfiguration(); URI httpsport=DFSUtil.getInfoServer(null,conf,"https"); assertEquals(new URI("https",null,"0.0.0.0",DFS_NAMENODE_HTTPS_PORT_DEFAULT,null,null,null),httpsport); URI httpport=DFSUtil.getInfoServer(null,conf,"http"); assertEquals(new URI("http",null,"0.0.0.0",DFS_NAMENODE_HTTP_PORT_DEFAULT,null,null,null),httpport); URI httpAddress=DFSUtil.getInfoServer(new InetSocketAddress("localhost",8020),conf,"http"); assertEquals(URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),httpAddress); }

EqualityVerifier 
/** * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure * nameserviceId for namenode is determined based on matching the address with * local node's address */ @Test public void getNameNodeNameServiceId(){ Configuration conf=setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals("nn1",DFSUtil.getNamenodeNameServiceId(conf)); }

APIUtilityVerifier EqualityVerifier 
@Test public void testGetHaNnHttpAddresses() throws IOException { final String LOGICAL_HOST_NAME="ns1"; final String NS1_NN1_ADDR="ns1-nn1.example.com:8020"; final String NS1_NN2_ADDR="ns1-nn2.example.com:8020"; Configuration conf=createWebHDFSHAConfiguration(LOGICAL_HOST_NAME,NS1_NN1_ADDR,NS1_NN2_ADDR); Map> map=DFSUtil.getHaNnWebHdfsAddresses(conf,"webhdfs"); assertEquals(NS1_NN1_ADDR,map.get("ns1").get("nn1").toString()); assertEquals(NS1_NN2_ADDR,map.get("ns1").get("nn2").toString()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests to ensure default namenode is used as fallback */ @Test public void testDefaultNamenode() throws IOException { HdfsConfiguration conf=new HdfsConfiguration(); final String hdfs_default="hdfs://localhost:9999/"; conf.set(FS_DEFAULT_NAME_KEY,hdfs_default); Map> addrMap=DFSUtil.getNNServiceRpcAddresses(conf); assertEquals(1,addrMap.size()); Map defaultNsMap=addrMap.get(null); assertEquals(1,defaultNsMap.size()); assertEquals(9999,defaultNsMap.get(null).getPort()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetOnlyNameServiceIdOrNull(){ HdfsConfiguration conf=new HdfsConfiguration(); conf.set(DFS_NAMESERVICES,"ns1,ns2"); assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf)); conf.set(DFS_NAMESERVICES,""); assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf)); conf.set(DFS_NAMESERVICES,"ns1"); assertEquals("ns1",DFSUtil.getOnlyNameServiceIdOrNull(conf)); }

InternalCallVerifier EqualityVerifier 
@Test public void getNameNodeServiceAddr() throws IOException { HdfsConfiguration conf=new HdfsConfiguration(); final String NS1_NN1_HOST="ns1-nn1.example.com:8020"; final String NS1_NN1_HOST_SVC="ns1-nn2.example.com:8021"; final String NS1_NN2_HOST="ns1-nn1.example.com:8020"; final String NS1_NN2_HOST_SVC="ns1-nn2.example.com:8021"; conf.set(DFS_NAMESERVICES,"ns1"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"nn1,nn2"); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_HOST); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_HOST); assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,null,"nn1")); assertEquals(NS1_NN2_HOST,DFSUtil.getNamenodeServiceAddr(conf,null,"nn2")); assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","nn1")); assertEquals(null,DFSUtil.getNamenodeServiceAddr(conf,"invalid","nn1")); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns1","nn1"),NS1_NN1_HOST_SVC); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,"ns1","nn2"),NS1_NN2_HOST_SVC); assertEquals(NS1_NN1_HOST_SVC,DFSUtil.getNamenodeServiceAddr(conf,null,"nn1")); assertEquals(NS1_NN2_HOST_SVC,DFSUtil.getNamenodeServiceAddr(conf,null,"nn2")); assertEquals("ns1",DFSUtil.getNamenodeNameServiceId(conf)); assertEquals("ns1",DFSUtil.getSecondaryNameServiceId(conf)); }

EqualityVerifier 
/** * Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure * nameserviceId for backup node is determined based on matching the address * with local node's address */ @Test public void getSecondaryNameServiceId(){ Configuration conf=setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY); assertEquals("nn1",DFSUtil.getSecondaryNameServiceId(conf)); }

InternalCallVerifier EqualityVerifier 
/** * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure * nameserviceId from the configuration returned */ @Test public void getNameServiceId(){ HdfsConfiguration conf=new HdfsConfiguration(); conf.set(DFS_NAMESERVICE_ID,"nn1"); assertEquals("nn1",DFSUtil.getNamenodeNameServiceId(conf)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test {@link DFSUtil#getNameServiceIds(Configuration)} */ @Test public void testGetNameServiceIds(){ HdfsConfiguration conf=new HdfsConfiguration(); conf.set(DFS_NAMESERVICES,"nn1,nn2"); Collection nameserviceIds=DFSUtil.getNameServiceIds(conf); Iterator it=nameserviceIds.iterator(); assertEquals(2,nameserviceIds.size()); assertEquals("nn1",it.next().toString()); assertEquals("nn2",it.next().toString()); }

EqualityVerifier 
@Test public void testSubstituteForWildcardAddress() throws IOException { assertEquals("foo:12345",DFSUtil.substituteForWildcardAddress("0.0.0.0:12345","foo")); assertEquals("127.0.0.1:12345",DFSUtil.substituteForWildcardAddress("127.0.0.1:12345","foo")); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHANameNodesWithFederation() throws URISyntaxException { HdfsConfiguration conf=new HdfsConfiguration(); final String NS1_NN1_HOST="ns1-nn1.example.com:8020"; final String NS1_NN2_HOST="ns1-nn2.example.com:8020"; final String NS2_NN1_HOST="ns2-nn1.example.com:8020"; final String NS2_NN2_HOST="ns2-nn2.example.com:8020"; conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://ns1"); conf.set(DFS_NAMESERVICES,"ns1,ns2"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns1"),"ns1-nn1,ns1-nn2"); conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX,"ns2"),"ns2-nn1,ns2-nn2"); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","ns1-nn1"),NS1_NN1_HOST); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns1","ns1-nn2"),NS1_NN2_HOST); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns2","ns2-nn1"),NS2_NN1_HOST); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"ns2","ns2-nn2"),NS2_NN2_HOST); Map> map=DFSUtil.getHaNnRpcAddresses(conf); assertTrue(HAUtil.isHAEnabled(conf,"ns1")); assertTrue(HAUtil.isHAEnabled(conf,"ns2")); assertFalse(HAUtil.isHAEnabled(conf,"ns3")); assertEquals(NS1_NN1_HOST,map.get("ns1").get("ns1-nn1").toString()); assertEquals(NS1_NN2_HOST,map.get("ns1").get("ns1-nn2").toString()); assertEquals(NS2_NN1_HOST,map.get("ns2").get("ns2-nn1").toString()); assertEquals(NS2_NN2_HOST,map.get("ns2").get("ns2-nn2").toString()); assertEquals(NS1_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","ns1-nn1")); assertEquals(NS1_NN2_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns1","ns1-nn2")); assertEquals(NS2_NN1_HOST,DFSUtil.getNamenodeServiceAddr(conf,"ns2","ns2-nn1")); assertEquals(null,DFSUtil.getNamenodeServiceAddr(conf,null,"ns1-nn1")); assertEquals(null,DFSUtil.getNamenodeNameServiceId(conf)); assertEquals(null,DFSUtil.getSecondaryNameServiceId(conf)); Collection uris=DFSUtil.getNameServiceUris(conf,DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(2,uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://ns2"))); }

EqualityVerifier 
/** * Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure * nameserviceId for backup node is determined based on matching the address * with local node's address */ @Test public void getBackupNameServiceId(){ Configuration conf=setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY); assertEquals("nn1",DFSUtil.getBackupNameServiceId(conf)); }

APIUtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier 
@Test(timeout=15000) public void testLocalhostReverseLookup(){ Assume.assumeTrue(!Shell.WINDOWS); HdfsConfiguration conf=new HdfsConfiguration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"hdfs://127.0.0.1:8020"); Collection uris=DFSUtil.getNameServiceUris(conf); assertEquals(1,uris.size()); for ( URI uri : uris) { assertThat(uri.getHost(),not("127.0.0.1")); } }

EqualityVerifier 
@Test(timeout=1000) public void testDurationToString() throws Exception { assertEquals("000:00:00:00.000",DFSUtil.durationToString(0)); assertEquals("001:01:01:01.000",DFSUtil.durationToString(((24 * 60 * 60) + (60 * 60) + (60)+ 1) * 1000)); assertEquals("000:23:59:59.999",DFSUtil.durationToString(((23 * 60 * 60) + (59 * 60) + (59)) * 1000 + 999)); assertEquals("-001:01:01:01.000",DFSUtil.durationToString(-((24 * 60 * 60) + (60 * 60) + (60)+ 1) * 1000)); assertEquals("-000:23:59:59.574",DFSUtil.durationToString(-(((23 * 60 * 60) + (59 * 60) + (59)) * 1000 + 574))); }

InternalCallVerifier EqualityVerifier 
/** * Test to ensure nameservice specific keys in the configuration are * copied to generic keys when the namenode starts. */ @Test public void testConfModificationFederationOnly(){ final HdfsConfiguration conf=new HdfsConfiguration(); String nsId="ns1"; conf.set(DFS_NAMESERVICES,nsId); conf.set(DFS_NAMESERVICE_ID,nsId); for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) { conf.set(DFSUtil.addKeySuffixes(key,nsId),key); } NameNode.initializeGenericKeys(conf,nsId,null); for ( String key : NameNode.NAMENODE_SPECIFIC_KEYS) { assertEquals(key,conf.get(key)); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test conversion of LocatedBlock to BlockLocation */ @Test public void testLocatedBlocks2Locations(){ DatanodeInfo d=DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo[] ds=new DatanodeInfo[1]; ds[0]=d; ExtendedBlock b1=new ExtendedBlock("bpid",1,1,1); LocatedBlock l1=new LocatedBlock(b1,ds,0,false); ExtendedBlock b2=new ExtendedBlock("bpid",2,1,1); LocatedBlock l2=new LocatedBlock(b2,ds,0,true); List ls=Arrays.asList(l1,l2); LocatedBlocks lbs=new LocatedBlocks(10,false,ls,l2,true,null); BlockLocation[] bs=DFSUtil.locatedBlocks2Locations(lbs); assertTrue("expected 2 blocks but got " + bs.length,bs.length == 2); int corruptCount=0; for ( BlockLocation b : bs) { if (b.isCorrupt()) { corruptCount++; } } assertTrue("expected 1 corrupt files but got " + corruptCount,corruptCount == 1); bs=DFSUtil.locatedBlocks2Locations(new LocatedBlocks()); assertEquals(0,bs.length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}{@link DFSUtil#getNameServiceIdFromAddress(Configuration,InetSocketAddress,String)(Configuration)} */ @Test public void testMultipleNamenodes() throws IOException { HdfsConfiguration conf=new HdfsConfiguration(); conf.set(DFS_NAMESERVICES,"nn1,nn2"); final String NN1_ADDRESS="localhost:9000"; final String NN2_ADDRESS="localhost:9001"; final String NN3_ADDRESS="localhost:9002"; conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn1"),NN1_ADDRESS); conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,"nn2"),NN2_ADDRESS); Map> nnMap=DFSUtil.getNNServiceRpcAddresses(conf); assertEquals(2,nnMap.size()); Map nn1Map=nnMap.get("nn1"); assertEquals(1,nn1Map.size()); InetSocketAddress addr=nn1Map.get(null); assertEquals("localhost",addr.getHostName()); assertEquals(9000,addr.getPort()); Map nn2Map=nnMap.get("nn2"); assertEquals(1,nn2Map.size()); addr=nn2Map.get(null); assertEquals("localhost",addr.getHostName()); assertEquals(9001,addr.getPort()); checkNameServiceId(conf,NN1_ADDRESS,"nn1"); checkNameServiceId(conf,NN2_ADDRESS,"nn2"); checkNameServiceId(conf,NN3_ADDRESS,null); assertFalse(HAUtil.isHAEnabled(conf,"nn1")); assertFalse(HAUtil.isHAEnabled(conf,"nn2")); }

Class: org.apache.hadoop.hdfs.TestDataTransferKeepalive

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the client respects its keepalive timeout. */ @Test(timeout=30000) public void testClientResponsesKeepAliveTimeout() throws Exception { Configuration clientConf=new Configuration(conf); final long CLIENT_EXPIRY_MS=10L; clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS); clientConf.set(DFS_CLIENT_CONTEXT,"testClientResponsesKeepAliveTimeout"); DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf); PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache(); DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L); assertEquals(0,peerCache.size()); assertXceiverCount(0); DFSTestUtil.readFile(fs,TEST_FILE); assertEquals(1,peerCache.size()); assertXceiverCount(1); Thread.sleep(CLIENT_EXPIRY_MS + 1); Peer peer=peerCache.get(dn.getDatanodeId(),false); assertTrue(peer == null); assertEquals(0,peerCache.size()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Regression test for HDFS-3357. Check that the datanode is respecting * its configured keepalive timeout. */ @Test(timeout=30000) public void testDatanodeRespectsKeepAliveTimeout() throws Exception { Configuration clientConf=new Configuration(conf); final long CLIENT_EXPIRY_MS=60000L; clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,CLIENT_EXPIRY_MS); clientConf.set(DFS_CLIENT_CONTEXT,"testDatanodeRespectsKeepAliveTimeout"); DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf); PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache(); DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L); assertEquals(0,peerCache.size()); assertXceiverCount(0); DFSTestUtil.readFile(fs,TEST_FILE); assertEquals(1,peerCache.size()); assertXceiverCount(1); Thread.sleep(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT + 1); assertXceiverCount(0); assertEquals(1,peerCache.size()); Peer peer=peerCache.get(dn.getDatanodeId(),false); assertNotNull(peer); assertEquals(-1,peer.getInputStream().read()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testManyClosedSocketsInCache() throws Exception { Configuration clientConf=new Configuration(conf); clientConf.set(DFS_CLIENT_CONTEXT,"testManyClosedSocketsInCache"); DistributedFileSystem fs=(DistributedFileSystem)FileSystem.get(cluster.getURI(),clientConf); PeerCache peerCache=ClientContext.getFromConf(clientConf).getPeerCache(); DFSTestUtil.createFile(fs,TEST_FILE,1L,(short)1,0L); InputStream[] stms=new InputStream[5]; try { for (int i=0; i < stms.length; i++) { stms[i]=fs.open(TEST_FILE); } for ( InputStream stm : stms) { IOUtils.copyBytes(stm,new NullOutputStream(),1024); } } finally { IOUtils.cleanup(null,stms); } assertEquals(5,peerCache.size()); Thread.sleep(1500); assertXceiverCount(0); assertEquals(5,peerCache.size()); DFSTestUtil.readFile(fs,TEST_FILE); }

Class: org.apache.hadoop.hdfs.TestDataTransferProtocol

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPacketHeader() throws IOException { PacketHeader hdr=new PacketHeader(4,1024,100,false,4096,false); ByteArrayOutputStream baos=new ByteArrayOutputStream(); hdr.write(new DataOutputStream(baos)); PacketHeader readBack=new PacketHeader(); ByteArrayInputStream bais=new ByteArrayInputStream(baos.toByteArray()); readBack.readFields(new DataInputStream(bais)); assertEquals(hdr,readBack); readBack=new PacketHeader(); readBack.readFields(ByteBuffer.wrap(baos.toByteArray())); assertEquals(hdr,readBack); assertTrue(hdr.sanityCheck(99)); assertFalse(hdr.sanityCheck(100)); }

Class: org.apache.hadoop.hdfs.TestDatanodeBlockScanner

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDuplicateScans() throws Exception { long startTime=Time.monotonicNow(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).build(); FileSystem fs=null; try { fs=cluster.getFileSystem(); DataNode dataNode=cluster.getDataNodes().get(0); int infoPort=dataNode.getInfoPort(); long scanTimeBefore=0, scanTimeAfter=0; for (int i=1; i < 10; i++) { Path fileName=new Path("/test" + i); DFSTestUtil.createFile(fs,fileName,1024,(short)1,1000L); waitForVerification(infoPort,fs,fileName,i,startTime,TIMEOUT); if (i > 1) { scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (i - 1)))); assertFalse("scan time shoud not be 0",scanTimeAfter == 0); assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter); } scanTimeBefore=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + i))); } cluster.restartDataNode(0); Thread.sleep(10000); dataNode=cluster.getDataNodes().get(0); scanTimeAfter=DataNodeTestUtils.getLatestScanTime(dataNode,DFSTestUtil.getFirstBlock(fs,new Path("/test" + (9)))); assertEquals("There should not be duplicate scan",scanTimeBefore,scanTimeAfter); } finally { IOUtils.closeStream(fs); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestDatanodeRegistration

BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Regression test for HDFS-894 ensures that, when datanodes * are restarted, the new IPC port is registered with the * namenode. */ @Test public void testChangeIpcPort() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).build(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); cluster.restartDataNodes(); DatanodeInfo[] report=client.datanodeReport(DatanodeReportType.ALL); long firstUpdateAfterRestart=report[0].getLastUpdate(); boolean gotHeartbeat=false; for (int i=0; i < 10 && !gotHeartbeat; i++) { try { Thread.sleep(i * 1000); } catch ( InterruptedException ie) { } report=client.datanodeReport(DatanodeReportType.ALL); gotHeartbeat=(report[0].getLastUpdate() > firstUpdateAfterRestart); } if (!gotHeartbeat) { fail("Never got a heartbeat from restarted datanode."); } int realIpcPort=cluster.getDataNodes().get(0).getIpcPort(); assertEquals(realIpcPort,report[0].getIpcPort()); } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier EqualityVerifier 
@Test public void testChangeStorageID() throws Exception { final String DN_IP_ADDR="127.0.0.1"; final String DN_HOSTNAME="localhost"; final int DN_XFER_PORT=12345; final int DN_INFO_PORT=12346; final int DN_INFO_SECURE_PORT=12347; final int DN_IPC_PORT=12348; Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); NamenodeProtocols rpcServer=cluster.getNameNodeRpc(); DatanodeID dnId=new DatanodeID(DN_IP_ADDR,DN_HOSTNAME,"fake-datanode-id",DN_XFER_PORT,DN_INFO_PORT,DN_INFO_SECURE_PORT,DN_IPC_PORT); long nnCTime=cluster.getNamesystem().getFSImage().getStorage().getCTime(); StorageInfo mockStorageInfo=mock(StorageInfo.class); doReturn(nnCTime).when(mockStorageInfo).getCTime(); doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo).getLayoutVersion(); DatanodeRegistration dnReg=new DatanodeRegistration(dnId,mockStorageInfo,null,VersionInfo.getVersion()); rpcServer.registerDatanode(dnReg); DatanodeInfo[] report=client.datanodeReport(DatanodeReportType.ALL); assertEquals("Expected a registered datanode",1,report.length); dnId=new DatanodeID(DN_IP_ADDR,DN_HOSTNAME,"changed-fake-datanode-id",DN_XFER_PORT,DN_INFO_PORT,DN_INFO_SECURE_PORT,DN_IPC_PORT); dnReg=new DatanodeRegistration(dnId,mockStorageInfo,null,VersionInfo.getVersion()); rpcServer.registerDatanode(dnReg); report=client.datanodeReport(DatanodeReportType.ALL); assertEquals("Datanode with changed storage ID not recognized",1,report.length); } finally { if (cluster != null) { cluster.shutdown(); } } }

BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Ensure the datanode manager does not do host lookup after registration, * especially for node reports. * @throws Exception */ @Test public void testDNSLookups() throws Exception { MonitorDNS sm=new MonitorDNS(); System.setSecurityManager(sm); MiniDFSCluster cluster=null; try { HdfsConfiguration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(8).build(); cluster.waitActive(); int initialLookups=sm.lookups; assertTrue("dns security manager is active",initialLookups != 0); DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager(); dm.refreshNodes(conf); assertEquals(initialLookups,sm.lookups); dm.refreshNodes(conf); assertEquals(initialLookups,sm.lookups); dm.getDatanodeListForReport(DatanodeReportType.ALL); assertEquals(initialLookups,sm.lookups); dm.getDatanodeListForReport(DatanodeReportType.LIVE); assertEquals(initialLookups,sm.lookups); dm.getDatanodeListForReport(DatanodeReportType.DEAD); assertEquals(initialLookups,sm.lookups); } finally { if (cluster != null) { cluster.shutdown(); } System.setSecurityManager(null); } }

Class: org.apache.hadoop.hdfs.TestDecommission

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests decommission with replicas on the target datanode cannot be migrated * to other datanodes and satisfy the replication factor. Make sure the * datanode won't get stuck in decommissioning state. */ @Test(timeout=360000) public void testDecommission2() throws IOException { LOG.info("Starting test testDecommission"); int numNamenodes=1; int numDatanodes=4; conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,3); startCluster(numNamenodes,numDatanodes,conf); ArrayList> namenodeDecomList=new ArrayList>(numNamenodes); namenodeDecomList.add(0,new ArrayList(numDatanodes)); Path file1=new Path("testDecommission2.dat"); int replicas=4; ArrayList decommissionedNodes=namenodeDecomList.get(0); FileSystem fileSys=cluster.getFileSystem(0); FSNamesystem ns=cluster.getNamesystem(0); writeFile(fileSys,file1,replicas); int deadDecomissioned=ns.getNumDecomDeadDataNodes(); int liveDecomissioned=ns.getNumDecomLiveDataNodes(); DatanodeInfo decomNode=decommissionNode(0,null,decommissionedNodes,AdminStates.DECOMMISSIONED); decommissionedNodes.add(decomNode); assertEquals(deadDecomissioned,ns.getNumDecomDeadDataNodes()); assertEquals(liveDecomissioned + 1,ns.getNumDecomLiveDataNodes()); DFSClient client=getDfsClient(cluster.getNameNode(0),conf); assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length); assertNull(checkFile(fileSys,file1,replicas,decomNode.getXferAddr(),numDatanodes)); cleanupFile(fileSys,file1); cluster.shutdown(); startCluster(1,4,conf); cluster.shutdown(); }

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test using a "registration name" in a host include file. * Registration names are DataNode names specified in the configuration by * dfs.datanode.hostname. The DataNode will send this name to the NameNode * as part of its registration. Registration names are helpful when you * want to override the normal first result of DNS resolution on the * NameNode. For example, a given datanode IP may map to two hostnames, * and you may want to choose which hostname is used internally in the * cluster. * It is not recommended to use a registration name which is not also a * valid DNS hostname for the DataNode. See HDFS-5237 for background. */ @Test(timeout=360000) public void testIncludeByRegistrationName() throws IOException, InterruptedException { Configuration hdfsConf=new Configuration(conf); final String registrationName="127.0.0.100"; final String nonExistentDn="127.0.0.10"; hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,registrationName); cluster=new MiniDFSCluster.Builder(hdfsConf).numDataNodes(1).checkDataNodeHostConfig(true).setupHostsFile(true).build(); cluster.waitActive(); ArrayList nodes=new ArrayList(); nodes.add(nonExistentDn); writeConfigFile(hostsFile,nodes); refreshNodes(cluster.getNamesystem(0),hdfsConf); DFSClient client=getDfsClient(cluster.getNameNode(0),hdfsConf); while (true) { DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.DEAD); if (info.length == 1) { break; } LOG.info("Waiting for datanode to be marked dead"); Thread.sleep(HEARTBEAT_INTERVAL * 1000); } int dnPort=cluster.getDataNodes().get(0).getXferPort(); nodes=new ArrayList(); nodes.add(registrationName + ":" + dnPort); writeConfigFile(hostsFile,nodes); refreshNodes(cluster.getNamesystem(0),hdfsConf); cluster.restartDataNode(0); while (true) { DatanodeInfo info[]=client.datanodeReport(DatanodeReportType.LIVE); if (info.length == 1) { Assert.assertFalse(info[0].isDecommissioned()); Assert.assertFalse(info[0].isDecommissionInProgress()); assertEquals(registrationName,info[0].getHostName()); break; } LOG.info("Waiting for datanode to come back"); Thread.sleep(HEARTBEAT_INTERVAL * 1000); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests restart of namenode while datanode hosts are added to exclude file */ @Test(timeout=360000) public void testDecommissionWithNamenodeRestart() throws IOException, InterruptedException { LOG.info("Starting test testDecommissionWithNamenodeRestart"); int numNamenodes=1; int numDatanodes=1; int replicas=1; startCluster(numNamenodes,numDatanodes,conf); Path file1=new Path("testDecommission.dat"); FileSystem fileSys=cluster.getFileSystem(); writeFile(fileSys,file1,replicas); DFSClient client=getDfsClient(cluster.getNameNode(),conf); DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE); DatanodeID excludedDatanodeID=info[0]; String excludedDatanodeName=info[0].getXferAddr(); writeConfigFile(excludeFile,new ArrayList(Arrays.asList(excludedDatanodeName))); cluster.startDataNodes(conf,1,true,null,null,null,null); numDatanodes+=1; assertEquals("Number of datanodes should be 2 ",2,cluster.getDataNodes().size()); cluster.restartNameNode(); DatanodeInfo datanodeInfo=NameNodeAdapter.getDatanode(cluster.getNamesystem(),excludedDatanodeID); waitNodeState(datanodeInfo,AdminStates.DECOMMISSIONED); assertEquals("All datanodes must be alive",numDatanodes,client.datanodeReport(DatanodeReportType.LIVE).length); int tries=0; while (tries++ < 20) { try { Thread.sleep(1000); if (checkFile(fileSys,file1,replicas,datanodeInfo.getXferAddr(),numDatanodes) == null) { break; } } catch ( InterruptedException ie) { } } assertTrue("Checked if block was replicated after decommission, tried " + tries + " times.",tries < 20); cleanupFile(fileSys,file1); cluster.shutdown(); startCluster(numNamenodes,numDatanodes,conf); cluster.shutdown(); }

Class: org.apache.hadoop.hdfs.TestDefaultNameNodePort

EqualityVerifier 
@Test public void testGetAddressFromConf() throws Exception { Configuration conf=new HdfsConfiguration(); FileSystem.setDefaultUri(conf,"hdfs://foo/"); assertEquals(NameNode.getAddress(conf).getPort(),NameNode.DEFAULT_PORT); FileSystem.setDefaultUri(conf,"hdfs://foo:555/"); assertEquals(NameNode.getAddress(conf).getPort(),555); FileSystem.setDefaultUri(conf,"foo"); assertEquals(NameNode.getAddress(conf).getPort(),NameNode.DEFAULT_PORT); }

EqualityVerifier 
@Test public void testGetUri(){ assertEquals(NameNode.getUri(new InetSocketAddress("foo",555)),URI.create("hdfs://foo:555")); assertEquals(NameNode.getUri(new InetSocketAddress("foo",NameNode.DEFAULT_PORT)),URI.create("hdfs://foo")); }

EqualityVerifier 
@Test public void testGetAddressFromString() throws Exception { assertEquals(NameNode.getAddress("foo").getPort(),NameNode.DEFAULT_PORT); assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),NameNode.DEFAULT_PORT); assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),555); assertEquals(NameNode.getAddress("foo:555").getPort(),555); }

Class: org.apache.hadoop.hdfs.TestDisableConnCache

InternalCallVerifier EqualityVerifier 
/** * Test that the socket cache can be disabled by setting the capacity to * 0. Regression test for HDFS-3365. * @throws Exception */ @Test public void testDisableCache() throws Exception { HdfsConfiguration confWithoutCache=new HdfsConfiguration(); confWithoutCache.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,0); BlockReaderTestUtil util=new BlockReaderTestUtil(1,confWithoutCache); final Path testFile=new Path("/testConnCache.dat"); util.writeFile(testFile,FILE_SIZE / 1024); FileSystem fsWithoutCache=FileSystem.newInstance(util.getConf()); try { DFSTestUtil.readFile(fsWithoutCache,testFile); assertEquals(0,((DistributedFileSystem)fsWithoutCache).dfs.getClientContext().getPeerCache().size()); } finally { fsWithoutCache.close(); util.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestDistributedFileSystem

IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFileChecksum() throws Exception { final long seed=RAN.nextLong(); System.out.println("seed=" + seed); RAN.setSeed(seed); final Configuration conf=getTestConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final FileSystem hdfs=cluster.getFileSystem(); final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); final UserGroupInformation current=UserGroupInformation.getCurrentUser(); final UserGroupInformation ugi=UserGroupInformation.createUserForTesting(current.getShortUserName() + "x",new String[]{"user"}); try { hdfs.getFileChecksum(new Path("/test/TestNonExistingFile")); fail("Expecting FileNotFoundException"); } catch ( FileNotFoundException e) { assertTrue("Not throwing the intended exception message",e.getMessage().contains("File does not exist: /test/TestNonExistingFile")); } try { Path path=new Path("/test/TestExistingDir/"); hdfs.mkdirs(path); hdfs.getFileChecksum(path); fail("Expecting FileNotFoundException"); } catch ( FileNotFoundException e) { assertTrue("Not throwing the intended exception message",e.getMessage().contains("Path is not a file: /test/TestExistingDir")); } final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs=ugi.doAs(new PrivilegedExceptionAction(){ @Override public FileSystem run() throws Exception { return new Path(webhdfsuri).getFileSystem(conf); } } ); final Path dir=new Path("/filechecksum"); final int block_size=1024; final int buffer_size=conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,4096); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,512); for (int n=0; n < 5; n++) { final byte[] data=new byte[RAN.nextInt(block_size / 2 - 1) + n * block_size + 1]; RAN.nextBytes(data); System.out.println("data.length=" + data.length); final Path foo=new Path(dir,"foo" + n); { final FSDataOutputStream out=hdfs.create(foo,false,buffer_size,(short)2,block_size); out.write(data); out.close(); } final FileChecksum hdfsfoocs=hdfs.getFileChecksum(foo); System.out.println("hdfsfoocs=" + hdfsfoocs); final FileChecksum webhdfsfoocs=webhdfs.getFileChecksum(foo); System.out.println("webhdfsfoocs=" + webhdfsfoocs); final Path webhdfsqualified=new Path(webhdfsuri + dir,"foo" + n); final FileChecksum webhdfs_qfoocs=webhdfs.getFileChecksum(webhdfsqualified); System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs); final Path zeroByteFile=new Path(dir,"zeroByteFile" + n); { final FSDataOutputStream out=hdfs.create(zeroByteFile,false,buffer_size,(short)2,block_size); out.close(); } { final FileChecksum zeroChecksum=hdfs.getFileChecksum(zeroByteFile); assertEquals(zeroChecksum.toString(),"MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51"); } final Path bar=new Path(dir,"bar" + n); { final FSDataOutputStream out=hdfs.create(bar,false,buffer_size,(short)2,block_size); out.write(data); out.close(); } { final FileChecksum barcs=hdfs.getFileChecksum(bar); final int barhashcode=barcs.hashCode(); assertEquals(hdfsfoocs.hashCode(),barhashcode); assertEquals(hdfsfoocs,barcs); assertEquals(webhdfsfoocs.hashCode(),barhashcode); assertEquals(webhdfsfoocs,barcs); assertEquals(webhdfs_qfoocs.hashCode(),barhashcode); assertEquals(webhdfs_qfoocs,barcs); } hdfs.setPermission(dir,new FsPermission((short)0)); { try { webhdfs.getFileChecksum(webhdfsqualified); fail(); } catch ( IOException ioe) { FileSystem.LOG.info("GOOD: getting an exception",ioe); } } hdfs.setPermission(dir,new FsPermission((short)0777)); } cluster.shutdown(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCreateWithCustomChecksum() throws Exception { Configuration conf=getTestConfiguration(); MiniDFSCluster cluster=null; Path testBasePath=new Path("/test/csum"); Path path1=new Path(testBasePath,"file_wtih_crc1"); Path path2=new Path(testBasePath,"file_with_crc2"); ChecksumOpt opt1=new ChecksumOpt(DataChecksum.Type.CRC32C,512); ChecksumOpt opt2=new ChecksumOpt(DataChecksum.Type.CRC32,512); FsPermission perm=FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf)); EnumSet flags=EnumSet.of(CreateFlag.OVERWRITE,CreateFlag.CREATE); short repl=1; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); FileSystem dfs=cluster.getFileSystem(); dfs.mkdirs(testBasePath); FSDataOutputStream out1=dfs.create(path1,perm,flags,4096,repl,131072L,null,opt1); FSDataOutputStream out2=dfs.create(path2,perm,flags,4096,repl,131072L,null,opt2); for (int i=0; i < 1024; i++) { out1.write(i); out2.write(i); } out1.close(); out2.close(); MD5MD5CRC32FileChecksum sum1=(MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path1); MD5MD5CRC32FileChecksum sum2=(MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path2); assertFalse(sum1.equals(sum2)); assertEquals(DataChecksum.Type.CRC32C,sum1.getCrcType()); assertEquals(DataChecksum.Type.CRC32,sum2.getCrcType()); } finally { if (cluster != null) { cluster.getFileSystem().delete(testBasePath,true); cluster.shutdown(); } } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests error paths for{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)} */ @Test(timeout=60000) public void testGetFileBlockStorageLocationsError() throws Exception { final Configuration conf=getTestConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true); conf.setInt(DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,1500); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.getDataNodes(); final DistributedFileSystem fs=cluster.getFileSystem(); final Path tmpFile1=new Path("/errorfile1.dat"); final Path tmpFile2=new Path("/errorfile2.dat"); DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl); DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ try { List list=Lists.newArrayList(); list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024))); list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024))); int totalRepl=0; for ( BlockLocation loc : list) { totalRepl+=loc.getHosts().length; } if (totalRepl == 4) { return true; } } catch ( IOException e) { } return false; } } ,500,30000); BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024); BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024); List allLocs=Lists.newArrayList(); allLocs.addAll(Arrays.asList(blockLocs1)); allLocs.addAll(Arrays.asList(blockLocs2)); DataNodeFaultInjector injector=Mockito.mock(DataNodeFaultInjector.class); Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { Thread.sleep(3000); return null; } } ).when(injector).getHdfsBlocksMetadata(); DataNodeFaultInjector.instance=injector; BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(allLocs); for ( BlockStorageLocation loc : locs) { assertEquals("Found more than 0 cached hosts although RPCs supposedly timed out",0,loc.getCachedHosts().length); } DataNodeFaultInjector.instance=new DataNodeFaultInjector(); DataNodeProperties stoppedNode=cluster.stopDataNode(0); locs=fs.getFileBlockStorageLocations(allLocs); assertEquals("Expected two HdfsBlockLocation for two 1-block files",2,locs.length); for ( BlockStorageLocation l : locs) { assertEquals("Expected two replicas for each block",2,l.getHosts().length); assertEquals("Expected two VolumeIDs for each block",2,l.getVolumeIds().length); assertTrue("Expected one valid and one invalid volume",(l.getVolumeIds()[0] == null) ^ (l.getVolumeIds()[1] == null)); } cluster.restartDataNode(stoppedNode,true); cluster.waitActive(); fs.delete(tmpFile2,true); HATestUtil.waitForNNToIssueDeletions(cluster.getNameNode()); cluster.triggerHeartbeats(); HATestUtil.waitForDNDeletions(cluster); locs=fs.getFileBlockStorageLocations(allLocs); assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length); assertNotNull(locs[0].getVolumeIds()[0]); assertNotNull(locs[0].getVolumeIds()[1]); assertNull(locs[1].getVolumeIds()[0]); assertNull(locs[1].getVolumeIds()[1]); } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDFSClient() throws Exception { Configuration conf=getTestConfiguration(); final long grace=1000L; MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final String filepathstring="/test/LeaseChecker/foo"; final Path[] filepaths=new Path[4]; for (int i=0; i < filepaths.length; i++) { filepaths[i]=new Path(filepathstring + i); } final long millis=Time.now(); { final DistributedFileSystem dfs=cluster.getFileSystem(); dfs.dfs.getLeaseRenewer().setGraceSleepPeriod(grace); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); { final FSDataOutputStream out=dfs.create(filepaths[0]); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out.writeLong(millis); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out.close(); Thread.sleep(grace / 4 * 3); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); for (int i=0; i < 3; i++) { if (dfs.dfs.getLeaseRenewer().isRunning()) { Thread.sleep(grace / 2); } } assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); } { final FSDataOutputStream out1=dfs.create(filepaths[1]); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); final FSDataOutputStream out2=dfs.create(filepaths[2]); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out1.writeLong(millis); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out1.close(); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out2.writeLong(millis); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out2.close(); Thread.sleep(grace / 4 * 3); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); } { final FSDataOutputStream out3=dfs.create(filepaths[3]); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); Thread.sleep(grace / 4 * 3); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out3.writeLong(millis); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); out3.close(); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); Thread.sleep(grace / 4 * 3); assertTrue(dfs.dfs.getLeaseRenewer().isRunning()); for (int i=0; i < 3; i++) { if (dfs.dfs.getLeaseRenewer().isRunning()) { Thread.sleep(grace / 2); } } assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); } dfs.close(); } { FileSystem fs=cluster.getFileSystem(); Path dir=new Path("/wrwelkj"); assertFalse("File should not exist for test.",fs.exists(dir)); try { FSDataInputStream in=fs.open(dir); try { in.close(); fs.close(); } finally { assertTrue("Did not get a FileNotFoundException for non-existing" + " file.",false); } } catch ( FileNotFoundException fnf) { } } { final DistributedFileSystem dfs=cluster.getFileSystem(); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); FSDataInputStream in=dfs.open(filepaths[0]); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); assertEquals(millis,in.readLong()); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); in.close(); assertFalse(dfs.dfs.getLeaseRenewer().isRunning()); dfs.close(); } { String uri="hdfs://127.0.0.1:" + cluster.getNameNodePort() + "/test/ipAddress/file"; Path path=new Path(uri); FileSystem fs=FileSystem.get(path.toUri(),conf); FSDataOutputStream out=fs.create(path); byte[] buf=new byte[1024]; out.write(buf); out.close(); FSDataInputStream in=fs.open(path); in.readFully(buf); in.close(); fs.close(); } } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests the normal path of batching up BlockLocation[]s to be passed to a * single{@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}call */ @Test(timeout=60000) public void testGetFileBlockStorageLocationsBatching() throws Exception { final Configuration conf=getTestConfiguration(); ((Log4JLogger)ProtobufRpcEngine.LOG).getLogger().setLevel(Level.TRACE); ((Log4JLogger)BlockStorageLocationUtil.LOG).getLogger().setLevel(Level.TRACE); ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.TRACE); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,true); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { final DistributedFileSystem fs=cluster.getFileSystem(); final Path tmpFile1=new Path("/tmpfile1.dat"); final Path tmpFile2=new Path("/tmpfile2.dat"); DFSTestUtil.createFile(fs,tmpFile1,1024,(short)2,0xDEADDEADl); DFSTestUtil.createFile(fs,tmpFile2,1024,(short)2,0xDEADDEADl); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ try { List list=Lists.newArrayList(); list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile1,0,1024))); list.addAll(Arrays.asList(fs.getFileBlockLocations(tmpFile2,0,1024))); int totalRepl=0; for ( BlockLocation loc : list) { totalRepl+=loc.getHosts().length; } if (totalRepl == 4) { return true; } } catch ( IOException e) { } return false; } } ,500,30000); BlockLocation[] blockLocs1=fs.getFileBlockLocations(tmpFile1,0,1024); BlockLocation[] blockLocs2=fs.getFileBlockLocations(tmpFile2,0,1024); BlockLocation[] blockLocs=(BlockLocation[])ArrayUtils.addAll(blockLocs1,blockLocs2); BlockStorageLocation[] locs=fs.getFileBlockStorageLocations(Arrays.asList(blockLocs)); int counter=0; for ( BlockStorageLocation l : locs) { for (int i=0; i < l.getVolumeIds().length; i++) { VolumeId id=l.getVolumeIds()[i]; String name=l.getNames()[i]; if (id != null) { System.out.println("Datanode " + name + " has block "+ counter+ " on volume id "+ id.toString()); } } counter++; } assertEquals("Expected two HdfsBlockLocations for two 1-block files",2,locs.length); for ( BlockStorageLocation l : locs) { assertEquals("Expected two replicas for each block",2,l.getVolumeIds().length); for (int i=0; i < l.getVolumeIds().length; i++) { VolumeId id=l.getVolumeIds()[i]; String name=l.getNames()[i]; assertTrue("Expected block to be valid on datanode " + name,id != null); } } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestEncryptedTransfer

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testLongLivedClient() throws IOException, InterruptedException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); FileChecksum checksum=fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build(); BlockTokenSecretManager btsm=cluster.getNamesystem().getBlockManager().getBlockTokenSecretManager(); btsm.setKeyUpdateIntervalForTesting(2 * 1000); btsm.setTokenLifetime(2 * 1000); btsm.clearAllKeysForTesting(); fs=getFileSystem(conf); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); assertEquals(checksum,fs.getFileChecksum(TEST_PATH)); LOG.info("Sleeping so that encryption keys expire..."); Thread.sleep(15 * 1000); LOG.info("Done sleeping."); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); assertEquals(checksum,fs.getFileChecksum(TEST_PATH)); fs.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLongLivedReadClientAfterRestart() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); FileChecksum checksum=fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build(); fs=getFileSystem(conf); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); assertEquals(checksum,fs.getFileChecksum(TEST_PATH)); cluster.restartNameNode(); assertTrue(cluster.restartDataNode(0)); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); assertEquals(checksum,fs.getFileChecksum(TEST_PATH)); fs.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testEncryptedRead() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); FileChecksum checksum=fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build(); fs=getFileSystem(conf); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); assertEquals(checksum,fs.getFileChecksum(TEST_PATH)); fs.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testEncryptedAppendRequiringBlockTransfer() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); FSDataInputStream in=fs.open(TEST_PATH); List locatedBlocks=DFSTestUtil.getAllBlocks(in); in.close(); assertEquals(1,locatedBlocks.size()); assertEquals(3,locatedBlocks.get(0).getLocations().length); DataNode dn=cluster.getDataNode(locatedBlocks.get(0).getLocations()[0].getIpcPort()); dn.shutdown(); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT + PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); fs.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testEncryptedReadAfterNameNodeRestart() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); FileChecksum checksum=fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build(); fs=getFileSystem(conf); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); assertEquals(checksum,fs.getFileChecksum(TEST_PATH)); fs.close(); cluster.restartNameNode(); fs=getFileSystem(conf); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); assertEquals(checksum,fs.getFileChecksum(TEST_PATH)); fs.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLongLivedWriteClientAfterRestart() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); cluster.restartNameNode(); assertTrue(cluster.restartDataNodes()); cluster.waitActive(); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT + PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); fs.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testEncryptedReadWithRC4() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); FileChecksum checksum=fs.getFileChecksum(TEST_PATH); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); conf.set(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY,"rc4"); cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build(); fs=getFileSystem(conf); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); assertEquals(checksum,fs.getFileChecksum(TEST_PATH)); fs.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testClientThatDoesNotSupportEncryption() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10); cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); fs.close(); cluster.shutdown(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).manageDataDfsDirs(false).manageNameDfsDirs(false).format(false).startupOption(StartupOption.REGULAR).build(); fs=getFileSystem(conf); DFSClient client=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs); DFSClient spyClient=Mockito.spy(client); Mockito.doReturn(false).when(spyClient).shouldEncryptData(); DFSClientAdapter.setDFSClient((DistributedFileSystem)fs,spyClient); LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataNode.class)); try { assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) { fail("Should not have been able to read without encryption enabled."); } } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block:",ioe); } finally { logs.stopCapturing(); } fs.close(); if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")) { GenericTestUtils.assertMatches(logs.getOutput(),"Failed to read expected encryption handshake from client at"); } } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier EqualityVerifier 
@Test public void testEncryptedAppend() throws IOException { MiniDFSCluster cluster=null; try { Configuration conf=new Configuration(); setEncryptionConfigKeys(conf); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs=getFileSystem(conf); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); writeTestDataToFile(fs); assertEquals(PLAIN_TEXT + PLAIN_TEXT,DFSTestUtil.readFile(fs,TEST_PATH)); fs.close(); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestEncryptionZones

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test getEncryptionZoneForPath as a non super user. */ @Test(timeout=60000) public void testGetEZAsNonSuperUser() throws Exception { final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); final Path testRoot=new Path(fsHelper.getTestRootDir()); final Path superPath=new Path(testRoot,"superuseronly"); final Path superPathFile=new Path(superPath,"file1"); final Path allPath=new Path(testRoot,"accessall"); final Path allPathFile=new Path(allPath,"file1"); final Path nonEZDir=new Path(testRoot,"nonEZDir"); final Path nonEZFile=new Path(nonEZDir,"file1"); final int len=8192; fsWrapper.mkdir(testRoot,new FsPermission((short)0777),true); fsWrapper.mkdir(superPath,new FsPermission((short)0700),false); fsWrapper.mkdir(allPath,new FsPermission((short)0777),false); fsWrapper.mkdir(nonEZDir,new FsPermission((short)0777),false); dfsAdmin.createEncryptionZone(superPath,TEST_KEY); dfsAdmin.createEncryptionZone(allPath,TEST_KEY); dfsAdmin.allowSnapshot(new Path("/")); final Path newSnap=fs.createSnapshot(new Path("/")); DFSTestUtil.createFile(fs,superPathFile,len,(short)1,0xFEED); DFSTestUtil.createFile(fs,allPathFile,len,(short)1,0xFEED); DFSTestUtil.createFile(fs,nonEZFile,len,(short)1,0xFEED); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final HdfsAdmin userAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf); try { userAdmin.getEncryptionZoneForPath(null); fail("should have thrown NPE"); } catch ( NullPointerException e) { } assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPath).getPath().toString()); assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(allPathFile).getPath().toString()); try { userAdmin.getEncryptionZoneForPath(superPathFile); fail("expected AccessControlException"); } catch ( AccessControlException e) { assertExceptionContains("Permission denied:",e); } assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZDir)); assertNull("expected null for non-ez path",userAdmin.getEncryptionZoneForPath(nonEZFile)); String snapshottedAllPath=newSnap.toString() + allPath.toString(); assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString()); fs.delete(allPathFile,false); assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString()); fs.delete(allPath,true); assertEquals("expected ez path",allPath.toString(),userAdmin.getEncryptionZoneForPath(new Path(snapshottedAllPath)).getPath().toString()); assertNull("expected null for deleted file path",userAdmin.getEncryptionZoneForPath(allPathFile)); assertNull("expected null for deleted directory path",userAdmin.getEncryptionZoneForPath(allPath)); return null; } } ); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testReadWrite() throws Exception { final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf); final Path baseFile=new Path("/base"); final int len=8192; DFSTestUtil.createFile(fs,baseFile,len,(short)1,0xFEED); final Path zone=new Path("/zone"); fs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone,TEST_KEY); final Path encFile1=new Path(zone,"myfile"); DFSTestUtil.createFile(fs,encFile1,len,(short)1,0xFEED); verifyFilesEqual(fs,baseFile,encFile1,len); assertNumZones(1); String keyName=dfsAdmin.listEncryptionZones().next().getKeyName(); cluster.getNamesystem().getProvider().rollNewVersion(keyName); verifyFilesEqual(fs,baseFile,encFile1,len); final Path encFile2=new Path(zone,"myfile2"); DFSTestUtil.createFile(fs,encFile2,len,(short)1,0xFEED); FileEncryptionInfo feInfo1=getFileEncryptionInfo(encFile1); FileEncryptionInfo feInfo2=getFileEncryptionInfo(encFile2); assertFalse("EDEKs should be different",Arrays.equals(feInfo1.getEncryptedDataEncryptionKey(),feInfo2.getEncryptedDataEncryptionKey())); assertNotEquals("Key was rolled, versions should be different",feInfo1.getEzKeyVersionName(),feInfo2.getEzKeyVersionName()); verifyFilesEqual(fs,encFile1,encFile2,len); }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testCipherSuiteNegotiation() throws Exception { final HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf); final Path zone=new Path("/zone"); fs.mkdirs(zone); dfsAdmin.createEncryptionZone(zone,TEST_KEY); DFSTestUtil.createFile(fs,new Path(zone,"success1"),0,(short)1,0xFEED); fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(0); try { DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED); fail("Created a file without specifying a CipherSuite!"); } catch ( UnknownCipherSuiteException e) { assertExceptionContains("No cipher suites",e); } fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); try { DFSTestUtil.createFile(fs,new Path(zone,"fail"),0,(short)1,0xFEED); fail("Created a file without specifying a CipherSuite!"); } catch ( UnknownCipherSuiteException e) { assertExceptionContains("No cipher suites",e); } fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3); fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); DFSTestUtil.createFile(fs,new Path(zone,"success2"),0,(short)1,0xFEED); fs.getClient().cipherSuites=Lists.newArrayListWithCapacity(3); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN); fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING); DFSTestUtil.createFile(fs,new Path(zone,"success3"),4096,(short)1,0xFEED); cluster.getNamesystem().getProvider().flush(); KeyProvider provider=KeyProviderFactory.getProviders(conf).get(0); List keys=provider.getKeys(); assertEquals("Expected NN to have created one key per zone",1,keys.size()); List allVersions=Lists.newArrayList(); for ( String key : keys) { List versions=provider.getKeyVersions(key); assertEquals("Should only have one key version per key",1,versions.size()); allVersions.addAll(versions); } for (int i=2; i <= 3; i++) { FileEncryptionInfo feInfo=getFileEncryptionInfo(new Path(zone.toString() + "/success" + i)); assertEquals(feInfo.getCipherSuite(),CipherSuite.AES_CTR_NOPADDING); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Tests the retry logic in startFile. We release the lock while generating * an EDEK, so tricky things can happen in the intervening time. */ @Test(timeout=120000) public void testStartFileRetry() throws Exception { final Path zone1=new Path("/zone1"); final Path file=new Path(zone1,"file1"); fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true); ExecutorService executor=Executors.newSingleThreadExecutor(); executor.submit(new InjectFaultTask(){ @Override public void doFault() throws Exception { dfsAdmin.createEncryptionZone(zone1,TEST_KEY); } @Override public void doCleanup() throws Exception { assertEquals("Expected a startFile retry",2,injector.generateCount); fsWrapper.delete(file,false); } } ).get(); executor.submit(new InjectFaultTask(){ @Override public void doFault() throws Exception { fsWrapper.delete(zone1,true); } @Override public void doCleanup() throws Exception { assertEquals("Expected no startFile retries",1,injector.generateCount); fsWrapper.delete(file,false); } } ).get(); fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true); final String otherKey="otherKey"; DFSTestUtil.createKey(otherKey,cluster,conf); dfsAdmin.createEncryptionZone(zone1,TEST_KEY); executor.submit(new InjectFaultTask(){ @Override public void doFault() throws Exception { fsWrapper.delete(zone1,true); fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true); dfsAdmin.createEncryptionZone(zone1,otherKey); } @Override public void doCleanup() throws Exception { assertEquals("Expected a startFile retry",2,injector.generateCount); fsWrapper.delete(zone1,true); } } ).get(); fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true); final String anotherKey="anotherKey"; DFSTestUtil.createKey(anotherKey,cluster,conf); dfsAdmin.createEncryptionZone(zone1,anotherKey); String keyToUse=otherKey; MyInjector injector=new MyInjector(); EncryptionFaultInjector.instance=injector; Future future=executor.submit(new CreateFileTask(fsWrapper,file)); for (int i=0; i < 10; i++) { injector.ready.await(); fsWrapper.delete(zone1,true); fsWrapper.mkdir(zone1,FsPermission.getDirDefault(),true); dfsAdmin.createEncryptionZone(zone1,keyToUse); if (keyToUse == otherKey) { keyToUse=anotherKey; } else { keyToUse=otherKey; } injector.wait.countDown(); injector=new MyInjector(); EncryptionFaultInjector.instance=injector; } try { future.get(); fail("Expected exception from too many retries"); } catch ( ExecutionException e) { assertExceptionContains("Too many retries because of encryption zone operations",e.getCause()); } }

Class: org.apache.hadoop.hdfs.TestFileAppend

InternalCallVerifier EqualityVerifier 
/** * Tests appending after soft-limit expires. */ @Test public void testAppendAfterSoftLimit() throws IOException, InterruptedException { Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,1); final long softLimit=1L; final long hardLimit=9999999L; MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.setLeasePeriod(softLimit,hardLimit); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); FileSystem fs2=new DistributedFileSystem(); fs2.initialize(fs.getUri(),conf); final Path testPath=new Path("/testAppendAfterSoftLimit"); final byte[] fileContents=AppendTestUtil.initBuffer(32); FSDataOutputStream out=fs.create(testPath); out.write(fileContents); Thread.sleep(250); try { FSDataOutputStream appendStream2=fs2.append(testPath); appendStream2.write(fileContents); appendStream2.close(); assertEquals(fileContents.length,fs.getFileStatus(testPath).getLen()); } finally { fs.close(); fs2.close(); cluster.shutdown(); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test two consecutive appends on a file with a full block. */ @Test public void testAppendTwice() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); final FileSystem fs1=cluster.getFileSystem(); final FileSystem fs2=AppendTestUtil.createHdfsWithDifferentUsername(conf); try { final Path p=new Path("/testAppendTwice/foo"); final int len=1 << 16; final byte[] fileContents=AppendTestUtil.initBuffer(len); { FSDataOutputStream out=fs2.create(p,true,4096,(short)1,len); out.write(fileContents,0,len); out.close(); } fs2.append(p); fs1.append(p); Assert.fail(); } catch ( RemoteException re) { AppendTestUtil.LOG.info("Got an exception:",re); Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),re.getClassName()); } finally { fs2.close(); fs1.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestFileAppend3

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * TC11: Racing rename * @throws IOException an exception might be thrown */ @Test public void testTC11() throws Exception { final Path p=new Path("/TC11/foo"); System.out.println("p=" + p); final int len1=(int)BLOCK_SIZE; { FSDataOutputStream out=fs.create(p,false,buffersize,REPLICATION,BLOCK_SIZE); AppendTestUtil.write(out,0,len1); out.close(); } FSDataOutputStream out=fs.append(p); final int len2=(int)BLOCK_SIZE / 2; AppendTestUtil.write(out,len1,len2); out.hflush(); final Path pnew=new Path(p + ".new"); assertTrue(fs.rename(p,pnew)); out.close(); final long len=fs.getFileStatus(pnew).getLen(); final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(pnew.toString(),0L,len); final int numblock=locatedblocks.locatedBlockCount(); for (int i=0; i < numblock; i++) { final LocatedBlock lb=locatedblocks.get(i); final ExtendedBlock blk=lb.getBlock(); final long size=lb.getBlockSize(); if (i < numblock - 1) { assertEquals(BLOCK_SIZE,size); } for ( DatanodeInfo datanodeinfo : lb.getLocations()) { final DataNode dn=cluster.getDataNode(datanodeinfo.getIpcPort()); final Block metainfo=DataNodeTestUtils.getFSDataset(dn).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId()); assertEquals(size,metainfo.getNumBytes()); } } }

InternalCallVerifier EqualityVerifier 
/** * Append to a partial CRC chunk and * the first write does not fill up the partial CRC trunk * * * @throws IOException */ @Test public void testAppendToPartialChunk() throws IOException { final Path p=new Path("/partialChunk/foo"); final int fileLen=513; System.out.println("p=" + p); byte[] fileContents=AppendTestUtil.initBuffer(fileLen); FSDataOutputStream stm=AppendTestUtil.createFile(fs,p,1); stm.write(fileContents,0,1); stm.close(); System.out.println("Wrote 1 byte and closed the file " + p); stm=fs.append(p); stm.write(fileContents,1,1); stm.hflush(); stm.close(); System.out.println("Append 1 byte and closed the file " + p); stm=fs.append(p); assertEquals(2,stm.getPos()); stm.write(fileContents,2,1); stm.hflush(); System.out.println("Append and flush 1 byte"); stm.write(fileContents,3,2); stm.hflush(); System.out.println("Append and flush 2 byte"); stm.write(fileContents,5,fileLen - 5); stm.close(); System.out.println("Flush 508 byte and closed the file " + p); AppendTestUtil.checkFullFile(fs,p,fileLen,fileContents,"Failed to append to a partial chunk"); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * TC7: Corrupted replicas are present. * @throws IOException an exception might be thrown */ @Test public void testTC7() throws Exception { final short repl=2; final Path p=new Path("/TC7/foo"); System.out.println("p=" + p); final int len1=(int)(BLOCK_SIZE / 2); { FSDataOutputStream out=fs.create(p,false,buffersize,repl,BLOCK_SIZE); AppendTestUtil.write(out,0,len1); out.close(); } DFSTestUtil.waitReplication(fs,p,repl); final LocatedBlocks locatedblocks=fs.dfs.getNamenode().getBlockLocations(p.toString(),0L,len1); assertEquals(1,locatedblocks.locatedBlockCount()); final LocatedBlock lb=locatedblocks.get(0); final ExtendedBlock blk=lb.getBlock(); assertEquals(len1,lb.getBlockSize()); DatanodeInfo[] datanodeinfos=lb.getLocations(); assertEquals(repl,datanodeinfos.length); final DataNode dn=cluster.getDataNode(datanodeinfos[0].getIpcPort()); final File f=DataNodeTestUtils.getBlockFile(dn,blk.getBlockPoolId(),blk.getLocalBlock()); final RandomAccessFile raf=new RandomAccessFile(f,"rw"); AppendTestUtil.LOG.info("dn=" + dn + ", blk="+ blk+ " (length="+ blk.getNumBytes()+ ")"); assertEquals(len1,raf.length()); raf.setLength(0); raf.close(); final int len2=(int)BLOCK_SIZE; { FSDataOutputStream out=fs.append(p); AppendTestUtil.write(out,len1,len2); out.close(); } AppendTestUtil.check(fs,p,len1 + len2); }

Class: org.apache.hadoop.hdfs.TestFileAppendRestart

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Regression test for HDFS-2991. Creates and appends to files * where blocks start/end on block boundaries. */ @Test public void testAppendRestart() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0); MiniDFSCluster cluster=null; FSDataOutputStream stream=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); FileSystem fs=cluster.getFileSystem(); File editLog=new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster,0).get(0),NNStorage.getInProgressEditsFileName(1)); EnumMap> counts; Path p1=new Path("/block-boundaries"); writeAndAppend(fs,p1,BLOCK_SIZE,BLOCK_SIZE); counts=FSImageTestUtil.countEditLogOpTypes(editLog); assertEquals(2,(int)counts.get(FSEditLogOpCodes.OP_ADD).held); assertEquals(2,(int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held); assertEquals(2,(int)counts.get(FSEditLogOpCodes.OP_CLOSE).held); Path p2=new Path("/not-block-boundaries"); writeAndAppend(fs,p2,BLOCK_SIZE / 2,BLOCK_SIZE); counts=FSImageTestUtil.countEditLogOpTypes(editLog); assertEquals(2 + 2,(int)counts.get(FSEditLogOpCodes.OP_ADD).held); assertEquals(1,(int)counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held); assertEquals(2 + 2,(int)counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held); assertEquals(2 + 2,(int)counts.get(FSEditLogOpCodes.OP_CLOSE).held); cluster.restartNameNode(); AppendTestUtil.check(fs,p1,2 * BLOCK_SIZE); AppendTestUtil.check(fs,p2,3 * BLOCK_SIZE / 2); } finally { IOUtils.closeStream(stream); if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier EqualityVerifier 
/** * Earlier versions of HDFS had a bug (HDFS-2991) which caused * append(), when called exactly at a block boundary, * to not log an OP_ADD. This ensures that we can read from * such buggy versions correctly, by loading an image created * using a namesystem image created with 0.23.1-rc2 exhibiting * the issue. */ @Test public void testLoadLogsFromBuggyEarlierVersions() throws IOException { final Configuration conf=new HdfsConfiguration(); String tarFile=System.getProperty("test.cache.data","build/test/cache") + "/" + HADOOP_23_BROKEN_APPEND_TGZ; String testDir=PathUtils.getTestDirName(getClass()); File dfsDir=new File(testDir,"image-with-buggy-append"); if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) { throw new IOException("Could not delete dfs directory '" + dfsDir + "'"); } FileUtil.unTar(new File(tarFile),new File(testDir)); File nameDir=new File(dfsDir,"name"); GenericTestUtils.assertExists(nameDir); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath()); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).numDataNodes(0).waitSafeMode(false).startupOption(StartupOption.UPGRADE).build(); try { FileSystem fs=cluster.getFileSystem(); Path testPath=new Path("/tmp/io_data/test_io_0"); assertEquals(2 * 1024 * 1024,fs.getFileStatus(testPath).getLen()); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestFileCorruption

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the case that a replica is reported corrupt while it is not * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown. * See Hadoop-4351. */ @Test public void testArrayOutOfBoundsException() throws Exception { MiniDFSCluster cluster=null; try { Configuration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); final Path FILE_PATH=new Path("/tmp.txt"); final long FILE_LEN=1L; DFSTestUtil.createFile(fs,FILE_PATH,FILE_LEN,(short)2,1L); final String bpid=cluster.getNamesystem().getBlockPoolId(); File storageDir=cluster.getInstanceStorageDir(0,0); File dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid); assertTrue("Data directory does not exist",dataDir.exists()); ExtendedBlock blk=getBlock(bpid,dataDir); if (blk == null) { storageDir=cluster.getInstanceStorageDir(0,1); dataDir=MiniDFSCluster.getFinalizedDir(storageDir,bpid); blk=getBlock(bpid,dataDir); } assertFalse("Data directory does not contain any blocks or there was an " + "IO error",blk == null); cluster.startDataNodes(conf,1,true,null,null); ArrayList datanodes=cluster.getDataNodes(); assertEquals(datanodes.size(),3); DataNode dataNode=datanodes.get(2); DatanodeRegistration dnR=DataNodeTestUtils.getDNRegistrationForBP(dataNode,blk.getBlockPoolId()); FSNamesystem ns=cluster.getNamesystem(); ns.writeLock(); try { cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,new DatanodeInfo(dnR),"TEST","STORAGE_ID"); } finally { ns.writeUnlock(); } fs.open(FILE_PATH); fs.delete(FILE_PATH,false); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestFileCreation

IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Test creating two files at the same time. */ @Test public void testConcurrentFileCreation() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { FileSystem fs=cluster.getFileSystem(); Path[] p={new Path("/foo"),new Path("/bar")}; FSDataOutputStream[] out={fs.create(p[0]),fs.create(p[1])}; int i=0; for (; i < 100; i++) { out[0].write(i); out[1].write(i); } out[0].close(); for (; i < 200; i++) { out[1].write(i); } out[1].close(); FSDataInputStream[] in={fs.open(p[0]),fs.open(p[1])}; for (i=0; i < 100; i++) { assertEquals(i,in[0].read()); } for (i=0; i < 200; i++) { assertEquals(i,in[1].read()); } } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Create a file, write something, hflush but not close. * Then change lease period and wait for lease recovery. * Finally, read the block directly from each Datanode and verify the content. */ @Test public void testLeaseExpireHardLimit() throws Exception { System.out.println("testLeaseExpireHardLimit start"); final long leasePeriod=1000; final int DATANODE_NUM=3; Configuration conf=new HdfsConfiguration(); conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000); conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); DistributedFileSystem dfs=null; try { cluster.waitActive(); dfs=cluster.getFileSystem(); final String f=DIR + "foo"; final Path fpath=new Path(f); HdfsDataOutputStream out=create(dfs,fpath,DATANODE_NUM); out.write("something".getBytes()); out.hflush(); int actualRepl=out.getCurrentBlockReplication(); assertTrue(f + " should be replicated to " + DATANODE_NUM+ " datanodes.",actualRepl == DATANODE_NUM); cluster.setLeasePeriod(leasePeriod,leasePeriod); try { Thread.sleep(5 * leasePeriod); } catch ( InterruptedException e) { } LocatedBlocks locations=dfs.dfs.getNamenode().getBlockLocations(f,0,Long.MAX_VALUE); assertEquals(1,locations.locatedBlockCount()); LocatedBlock locatedblock=locations.getLocatedBlocks().get(0); int successcount=0; for ( DatanodeInfo datanodeinfo : locatedblock.getLocations()) { DataNode datanode=cluster.getDataNode(datanodeinfo.getIpcPort()); ExtendedBlock blk=locatedblock.getBlock(); Block b=DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(blk.getBlockPoolId(),blk.getBlockId()); final File blockfile=DataNodeTestUtils.getFile(datanode,blk.getBlockPoolId(),b.getBlockId()); System.out.println("blockfile=" + blockfile); if (blockfile != null) { BufferedReader in=new BufferedReader(new FileReader(blockfile)); assertEquals("something",in.readLine()); in.close(); successcount++; } } System.out.println("successcount=" + successcount); assertTrue(successcount > 0); } finally { IOUtils.closeStream(dfs); cluster.shutdown(); } System.out.println("testLeaseExpireHardLimit successful"); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test that file leases are persisted across namenode restarts. */ @Test public void testFileCreationNamenodeRestart() throws IOException { Configuration conf=new HdfsConfiguration(); final int MAX_IDLE_TIME=2000; conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME); conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000); conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1); if (simulatedStorage) { SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); DistributedFileSystem fs=null; try { cluster.waitActive(); fs=cluster.getFileSystem(); final int nnport=cluster.getNameNodePort(); Path file1=new Path("/filestatus.dat"); HdfsDataOutputStream stm=create(fs,file1,1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1); assertEquals(file1 + " should be replicated to 1 datanode.",1,stm.getCurrentBlockReplication()); writeFile(stm,numBlocks * blockSize); stm.hflush(); assertEquals(file1 + " should still be replicated to 1 datanode.",1,stm.getCurrentBlockReplication()); Path fileRenamed=new Path("/filestatusRenamed.dat"); fs.rename(file1,fileRenamed); System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to "+ fileRenamed); file1=fileRenamed; Path file2=new Path("/filestatus2.dat"); FSDataOutputStream stm2=createFile(fs,file2,1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2); Path file3=new Path("/user/home/fullpath.dat"); FSDataOutputStream stm3=createFile(fs,file3,1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3); Path file4=new Path("/user/home/fullpath4.dat"); FSDataOutputStream stm4=createFile(fs,file4,1); System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4); fs.mkdirs(new Path("/bin")); fs.rename(new Path("/user/home"),new Path("/bin")); Path file3new=new Path("/bin/home/fullpath.dat"); System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to "+ file3new); Path file4new=new Path("/bin/home/fullpath4.dat"); System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to "+ file4new); cluster.shutdown(); try { Thread.sleep(2 * MAX_IDLE_TIME); } catch ( InterruptedException e) { } cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build(); cluster.waitActive(); cluster.shutdown(); try { Thread.sleep(5000); } catch ( InterruptedException e) { } cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build(); cluster.waitActive(); fs=cluster.getFileSystem(); DFSOutputStream dfstream=(DFSOutputStream)(stm.getWrappedStream()); dfstream.setTestFilename(file1.toString()); dfstream=(DFSOutputStream)(stm3.getWrappedStream()); dfstream.setTestFilename(file3new.toString()); dfstream=(DFSOutputStream)(stm4.getWrappedStream()); dfstream.setTestFilename(file4new.toString()); byte[] buffer=AppendTestUtil.randomBytes(seed,1); stm.write(buffer); stm.close(); stm2.write(buffer); stm2.close(); stm3.close(); stm4.close(); DFSClient client=fs.dfs; LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); assertTrue("Error blocks were not cleaned up for file " + file1,locations.locatedBlockCount() == 3); locations=client.getNamenode().getBlockLocations(file2.toString(),0,Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); assertTrue("Error blocks were not cleaned up for file " + file2,locations.locatedBlockCount() == 1); } finally { IOUtils.closeStream(fs); cluster.shutdown(); } }

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test that the filesystem removes the last block from a file if its * lease expires. */ @Test public void testFileCreationError2() throws IOException { long leasePeriod=1000; System.out.println("testFileCreationError2 start"); Configuration conf=new HdfsConfiguration(); conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000); conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY,1); if (simulatedStorage) { SimulatedFSDataset.setFactory(conf); } MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); DistributedFileSystem dfs=null; try { cluster.waitActive(); dfs=cluster.getFileSystem(); DFSClient client=dfs.dfs; Path file1=new Path("/filestatus.dat"); createFile(dfs,file1,1); System.out.println("testFileCreationError2: " + "Created file filestatus.dat with one replicas."); LocatedBlocks locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE); System.out.println("testFileCreationError2: " + "The file has " + locations.locatedBlockCount() + " blocks."); LocatedBlock location=client.getNamenode().addBlock(file1.toString(),client.clientName,null,null,INodeId.GRANDFATHER_INODE_ID,null); System.out.println("testFileCreationError2: " + "Added block " + location.getBlock()); locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE); int count=locations.locatedBlockCount(); System.out.println("testFileCreationError2: " + "The file now has " + count + " blocks."); cluster.setLeasePeriod(leasePeriod,leasePeriod); try { Thread.sleep(5 * leasePeriod); } catch ( InterruptedException e) { } locations=client.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE); System.out.println("testFileCreationError2: " + "locations = " + locations.locatedBlockCount()); assertEquals(0,locations.locatedBlockCount()); System.out.println("testFileCreationError2 successful"); } finally { IOUtils.closeStream(dfs); cluster.shutdown(); } }

UtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Same test but the client should bind to a local interface */ @Test public void testFileCreationSetLocalInterface() throws IOException { assumeTrue(System.getProperty("os.name").startsWith("Linux")); checkFileCreation("lo",false); try { checkFileCreation("bogus-interface",false); fail("Able to specify a bogus interface"); } catch ( UnknownHostException e) { assertEquals("No such interface bogus-interface",e.getMessage()); } }

IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Test creating a file whose data gets sync when closed */ @Test public void testFileCreationSyncOnClose() throws IOException { Configuration conf=new HdfsConfiguration(); conf.setBoolean(DFS_DATANODE_SYNCONCLOSE_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { FileSystem fs=cluster.getFileSystem(); Path[] p={new Path("/foo"),new Path("/bar")}; FSDataOutputStream[] out={fs.create(p[0]),fs.create(p[1])}; int i=0; for (; i < 100; i++) { out[0].write(i); out[1].write(i); } out[0].close(); for (; i < 200; i++) { out[1].write(i); } out[1].close(); FSDataInputStream[] in={fs.open(p[0]),fs.open(p[1])}; for (i=0; i < 100; i++) { assertEquals(i,in[0].read()); } for (i=0; i < 200; i++) { assertEquals(i,in[1].read()); } } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier EqualityVerifier 
/** * Test that server default values can be retrieved on the client side */ @Test public void testServerDefaults() throws IOException { Configuration conf=new HdfsConfiguration(); conf.setLong(DFS_BLOCK_SIZE_KEY,DFS_BLOCK_SIZE_DEFAULT); conf.setInt(DFS_BYTES_PER_CHECKSUM_KEY,DFS_BYTES_PER_CHECKSUM_DEFAULT); conf.setInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); conf.setInt(DFS_REPLICATION_KEY,DFS_REPLICATION_DEFAULT + 1); conf.setInt(IO_FILE_BUFFER_SIZE_KEY,IO_FILE_BUFFER_SIZE_DEFAULT); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DFSConfigKeys.DFS_REPLICATION_DEFAULT + 1).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); try { FsServerDefaults serverDefaults=fs.getServerDefaults(); assertEquals(DFS_BLOCK_SIZE_DEFAULT,serverDefaults.getBlockSize()); assertEquals(DFS_BYTES_PER_CHECKSUM_DEFAULT,serverDefaults.getBytesPerChecksum()); assertEquals(DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT,serverDefaults.getWritePacketSize()); assertEquals(DFS_REPLICATION_DEFAULT + 1,serverDefaults.getReplication()); assertEquals(IO_FILE_BUFFER_SIZE_DEFAULT,serverDefaults.getFileBufferSize()); } finally { fs.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestFileCreationClient

IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Test lease recovery Triggered by DFSClient. */ @Test public void testClientTriggeredLeaseRecovery() throws Exception { final int REPLICATION=3; Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,1); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,REPLICATION); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build(); try { final FileSystem fs=cluster.getFileSystem(); final Path dir=new Path("/wrwelkj"); SlowWriter[] slowwriters=new SlowWriter[10]; for (int i=0; i < slowwriters.length; i++) { slowwriters[i]=new SlowWriter(fs,new Path(dir,"file" + i)); } try { for (int i=0; i < slowwriters.length; i++) { slowwriters[i].start(); } Thread.sleep(1000); cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION)); System.out.println("Wait a few seconds"); Thread.sleep(5000); } finally { for (int i=0; i < slowwriters.length; i++) { if (slowwriters[i] != null) { slowwriters[i].running=false; slowwriters[i].interrupt(); } } for (int i=0; i < slowwriters.length; i++) { if (slowwriters[i] != null) { slowwriters[i].join(); } } } System.out.println("Verify the file"); for (int i=0; i < slowwriters.length; i++) { System.out.println(slowwriters[i].filepath + ": length=" + fs.getFileStatus(slowwriters[i].filepath).getLen()); FSDataInputStream in=null; try { in=fs.open(slowwriters[i].filepath); for (int j=0, x; (x=in.read()) != -1; j++) { assertEquals(j,x); } } finally { IOUtils.closeStream(in); } } } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestFileLengthOnClusterRestart

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests the fileLength when we sync the file and restart the cluster and * Datanodes not report to Namenode yet. */ @Test(timeout=60000) public void testFileLengthWithHSyncAndClusterRestartWithOutDNsRegister() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,512); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); HdfsDataInputStream in=null; try { Path path=new Path("/tmp/TestFileLengthOnClusterRestart","test"); DistributedFileSystem dfs=cluster.getFileSystem(); FSDataOutputStream out=dfs.create(path); int fileLength=1030; out.write(new byte[fileLength]); out.hsync(); cluster.restartNameNode(); cluster.waitActive(); in=(HdfsDataInputStream)dfs.open(path,1024); Assert.assertEquals(fileLength,in.getVisibleLength()); cluster.shutdownDataNodes(); cluster.restartNameNode(false); verifyNNIsInSafeMode(dfs); try { in=(HdfsDataInputStream)dfs.open(path); Assert.fail("Expected IOException"); } catch ( IOException e) { Assert.assertTrue(e.getLocalizedMessage().indexOf("Name node is in safe mode") >= 0); } } finally { if (null != in) { in.close(); } cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestFileStatus

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the FileStatus obtained calling listStatus on a file */ @Test public void testListStatusOnFile() throws IOException { FileStatus[] stats=fs.listStatus(file1); assertEquals(1,stats.length); FileStatus status=stats[0]; assertFalse(file1 + " should be a file",status.isDirectory()); assertEquals(blockSize,status.getBlockSize()); assertEquals(1,status.getReplication()); assertEquals(fileSize,status.getLen()); assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString()); RemoteIterator itor=fc.listStatus(file1); status=itor.next(); assertEquals(stats[0],status); assertFalse(file1 + " should be a file",status.isDirectory()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the FileStatus obtained calling getFileStatus on a file */ @Test public void testGetFileStatusOnFile() throws Exception { checkFile(fs,file1,1); FileStatus status=fs.getFileStatus(file1); assertFalse(file1 + " should be a file",status.isDirectory()); assertEquals(blockSize,status.getBlockSize()); assertEquals(1,status.getReplication()); assertEquals(fileSize,status.getLen()); assertEquals(file1.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString()); }

UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test getting a FileStatus object using a non-existant path */ @Test public void testGetFileStatusOnNonExistantFileDir() throws IOException { Path dir=new Path("/test/mkdirs"); try { fs.listStatus(dir); fail("listStatus of non-existent path should fail"); } catch ( FileNotFoundException fe) { assertEquals("File " + dir + " does not exist.",fe.getMessage()); } try { fc.listStatus(dir); fail("listStatus of non-existent path should fail"); } catch ( FileNotFoundException fe) { assertEquals("File " + dir + " does not exist.",fe.getMessage()); } try { fs.getFileStatus(dir); fail("getFileStatus of non-existent path should fail"); } catch ( FileNotFoundException fe) { assertTrue("Exception doesn't indicate non-existant path",fe.getMessage().startsWith("File does not exist")); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test calling getFileInfo directly on the client */ @Test public void testGetFileInfo() throws IOException { Path path=new Path("/"); assertTrue("/ should be a directory",fs.getFileStatus(path).isDirectory()); HdfsFileStatus fileInfo=dfsClient.getFileInfo("/noSuchFile"); assertEquals("Non-existant file should result in null",null,fileInfo); Path path1=new Path("/name1"); Path path2=new Path("/name1/name2"); assertTrue(fs.mkdirs(path1)); FSDataOutputStream out=fs.create(path2,false); out.close(); fileInfo=dfsClient.getFileInfo(path1.toString()); assertEquals(1,fileInfo.getChildrenNum()); fileInfo=dfsClient.getFileInfo(path2.toString()); assertEquals(0,fileInfo.getChildrenNum()); try { dfsClient.getFileInfo("non-absolute"); fail("getFileInfo for a non-absolute path did not throw IOException"); } catch ( RemoteException re) { assertTrue("Wrong exception for invalid file name",re.toString().contains("Invalid file name")); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test FileStatus objects obtained from a directory */ @Test public void testGetFileStatusOnDir() throws Exception { Path dir=new Path("/test/mkdirs"); assertTrue("mkdir failed",fs.mkdirs(dir)); assertTrue("mkdir failed",fs.exists(dir)); FileStatus status=fs.getFileStatus(dir); assertTrue(dir + " should be a directory",status.isDirectory()); assertTrue(dir + " should be zero size ",status.getLen() == 0); assertEquals(dir.makeQualified(fs.getUri(),fs.getWorkingDirectory()).toString(),status.getPath().toString()); FileStatus[] stats=fs.listStatus(dir); assertEquals(dir + " should be empty",0,stats.length); assertEquals(dir + " should be zero size ",0,fs.getContentSummary(dir).getLength()); RemoteIterator itor=fc.listStatus(dir); assertFalse(dir + " should be empty",itor.hasNext()); Path file2=new Path(dir,"filestatus2.dat"); DFSTestUtil.createFile(fs,file2,blockSize / 4,blockSize / 4,blockSize,(short)1,seed); checkFile(fs,file2,1); status=fs.getFileStatus(file2); assertEquals(blockSize,status.getBlockSize()); assertEquals(1,status.getReplication()); file2=fs.makeQualified(file2); assertEquals(file2.toString(),status.getPath().toString()); Path file3=new Path(dir,"filestatus3.dat"); DFSTestUtil.createFile(fs,file3,blockSize / 4,blockSize / 4,blockSize,(short)1,seed); checkFile(fs,file3,1); file3=fs.makeQualified(file3); final int expected=blockSize / 2; assertEquals(dir + " size should be " + expected,expected,fs.getContentSummary(dir).getLength()); stats=fs.listStatus(dir); assertEquals(dir + " should have two entries",2,stats.length); assertEquals(file2.toString(),stats[0].getPath().toString()); assertEquals(file3.toString(),stats[1].getPath().toString()); itor=fc.listStatus(dir); assertEquals(file2.toString(),itor.next().getPath().toString()); assertEquals(file3.toString(),itor.next().getPath().toString()); assertFalse("Unexpected addtional file",itor.hasNext()); Path dir3=fs.makeQualified(new Path(dir,"dir3")); fs.mkdirs(dir3); dir3=fs.makeQualified(dir3); stats=fs.listStatus(dir); assertEquals(dir + " should have three entries",3,stats.length); assertEquals(dir3.toString(),stats[0].getPath().toString()); assertEquals(file2.toString(),stats[1].getPath().toString()); assertEquals(file3.toString(),stats[2].getPath().toString()); itor=fc.listStatus(dir); assertEquals(dir3.toString(),itor.next().getPath().toString()); assertEquals(file2.toString(),itor.next().getPath().toString()); assertEquals(file3.toString(),itor.next().getPath().toString()); assertFalse("Unexpected addtional file",itor.hasNext()); Path dir4=fs.makeQualified(new Path(dir,"dir4")); fs.mkdirs(dir4); dir4=fs.makeQualified(dir4); Path dir5=fs.makeQualified(new Path(dir,"dir5")); fs.mkdirs(dir5); dir5=fs.makeQualified(dir5); stats=fs.listStatus(dir); assertEquals(dir + " should have five entries",5,stats.length); assertEquals(dir3.toString(),stats[0].getPath().toString()); assertEquals(dir4.toString(),stats[1].getPath().toString()); assertEquals(dir5.toString(),stats[2].getPath().toString()); assertEquals(file2.toString(),stats[3].getPath().toString()); assertEquals(file3.toString(),stats[4].getPath().toString()); itor=fc.listStatus(dir); assertEquals(dir3.toString(),itor.next().getPath().toString()); assertEquals(dir4.toString(),itor.next().getPath().toString()); assertEquals(dir5.toString(),itor.next().getPath().toString()); assertEquals(file2.toString(),itor.next().getPath().toString()); assertEquals(file3.toString(),itor.next().getPath().toString()); assertFalse(itor.hasNext()); fs.delete(dir,true); }

Class: org.apache.hadoop.hdfs.TestGetBlocks

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * test getBlocks */ @Test public void testGetBlocks() throws Exception { final Configuration CONF=new HdfsConfiguration(); final short REPLICATION_FACTOR=(short)2; final int DEFAULT_BLOCK_SIZE=1024; CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE); MiniDFSCluster cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).build(); try { cluster.waitActive(); long fileLen=2 * DEFAULT_BLOCK_SIZE; DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/tmp.txt"),fileLen,REPLICATION_FACTOR,0L); List locatedBlocks; DatanodeInfo[] dataNodes=null; boolean notWritten; do { final DFSClient dfsclient=new DFSClient(NameNode.getAddress(CONF),CONF); locatedBlocks=dfsclient.getNamenode().getBlockLocations("/tmp.txt",0,fileLen).getLocatedBlocks(); assertEquals(2,locatedBlocks.size()); notWritten=false; for (int i=0; i < 2; i++) { dataNodes=locatedBlocks.get(i).getLocations(); if (dataNodes.length != REPLICATION_FACTOR) { notWritten=true; try { Thread.sleep(10); } catch ( InterruptedException e) { } break; } } } while (notWritten); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); NamenodeProtocol namenode=NameNodeProxies.createProxy(CONF,NameNode.getUri(addr),NamenodeProtocol.class).getProxy(); BlockWithLocations[] locs; locs=namenode.getBlocks(dataNodes[0],fileLen).getBlocks(); assertEquals(locs.length,2); assertEquals(locs[0].getStorageIDs().length,2); assertEquals(locs[1].getStorageIDs().length,2); locs=namenode.getBlocks(dataNodes[0],DEFAULT_BLOCK_SIZE).getBlocks(); assertEquals(locs.length,1); assertEquals(locs[0].getStorageIDs().length,2); locs=namenode.getBlocks(dataNodes[0],1).getBlocks(); assertEquals(locs.length,1); assertEquals(locs[0].getStorageIDs().length,2); getBlocksWithException(namenode,dataNodes[0],0); getBlocksWithException(namenode,dataNodes[0],-1); DatanodeInfo info=DFSTestUtil.getDatanodeInfo("1.2.3.4"); getBlocksWithException(namenode,info,2); } finally { cluster.shutdown(); } }

APIUtilityVerifier IterativeVerifier EqualityVerifier 
@Test public void testBlockKey(){ Map map=new HashMap(); final Random RAN=new Random(); final long seed=RAN.nextLong(); System.out.println("seed=" + seed); RAN.setSeed(seed); long[] blkids=new long[10]; for (int i=0; i < blkids.length; i++) { blkids[i]=1000L + RAN.nextInt(100000); map.put(new Block(blkids[i],0,blkids[i]),blkids[i]); } System.out.println("map=" + map.toString().replace(",","\n ")); for (int i=0; i < blkids.length; i++) { Block b=new Block(blkids[i],0,GenerationStamp.GRANDFATHER_GENERATION_STAMP); Long v=map.get(b); System.out.println(b + " => " + v); assertEquals(blkids[i],v.longValue()); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test if the datanodes returned by{@link ClientProtocol#getBlockLocations(String,long,long)} is correct * when stale nodes checking is enabled. Also test during the scenario when 1) * stale nodes checking is enabled, 2) a writing is going on, 3) a datanode * becomes stale happen simultaneously * @throws Exception */ @Test public void testReadSelectNonStaleDatanode() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,true); long staleInterval=30 * 1000 * 60; conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,staleInterval); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).racks(racks).build(); cluster.waitActive(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); List nodeInfoList=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanodeListForReport(DatanodeReportType.LIVE); assertEquals("Unexpected number of datanodes",numDatanodes,nodeInfoList.size()); FileSystem fileSys=cluster.getFileSystem(); FSDataOutputStream stm=null; try { final Path fileName=new Path("/file1"); stm=fileSys.create(fileName,true,fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,4096),(short)3,blockSize); stm.write(new byte[(blockSize * 3) / 2]); stm.hflush(); LocatedBlocks blocks=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize); DatanodeInfo[] nodes=blocks.get(0).getLocations(); assertEquals(nodes.length,3); DataNode staleNode=null; DatanodeDescriptor staleNodeInfo=null; staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName()); assertNotNull(staleNode); staleNodeInfo=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId()); staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1); LocatedBlocks blocksAfterStale=client.getNamenode().getBlockLocations(fileName.toString(),0,blockSize); DatanodeInfo[] nodesAfterStale=blocksAfterStale.get(0).getLocations(); assertEquals(nodesAfterStale.length,3); assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName()); DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode,false); staleNodeInfo.setLastUpdate(Time.now()); LocatedBlock lastBlock=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock(); nodes=lastBlock.getLocations(); assertEquals(nodes.length,3); staleNode=this.stopDataNodeHeartbeat(cluster,nodes[0].getHostName()); assertNotNull(staleNode); cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(staleNode.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1); LocatedBlock lastBlockAfterStale=client.getLocatedBlocks(fileName.toString(),0,Long.MAX_VALUE).getLastLocatedBlock(); nodesAfterStale=lastBlockAfterStale.getLocations(); assertEquals(nodesAfterStale.length,3); assertEquals(nodesAfterStale[2].getHostName(),nodes[0].getHostName()); } finally { if (stm != null) { stm.close(); } if (client != null) { client.close(); } cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestHFlush

InternalCallVerifier EqualityVerifier 
/** * Test hsync (with updating block length in NameNode) while no data is * actually written yet */ @Test public void hSyncUpdateLength_00() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); DistributedFileSystem fileSystem=cluster.getFileSystem(); try { Path path=new Path(fName); FSDataOutputStream stm=fileSystem.create(path,true,4096,(short)2,AppendTestUtil.BLOCK_SIZE); System.out.println("Created file " + path.toString()); ((DFSOutputStream)stm.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); long currentFileLength=fileSystem.getFileStatus(path).getLen(); assertEquals(0L,currentFileLength); stm.close(); } finally { fileSystem.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestHdfsAdmin

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that we can set and clear quotas via {@link HdfsAdmin}. */ @Test public void testHdfsAdminSetQuota() throws Exception { HdfsAdmin dfsAdmin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf); FileSystem fs=null; try { fs=FileSystem.get(conf); assertTrue(fs.mkdirs(TEST_PATH)); assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.setSpaceQuota(TEST_PATH,10); assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.setQuota(TEST_PATH,10); assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(10,fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.clearSpaceQuota(TEST_PATH); assertEquals(10,fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota()); dfsAdmin.clearQuota(TEST_PATH); assertEquals(-1,fs.getContentSummary(TEST_PATH).getQuota()); assertEquals(-1,fs.getContentSummary(TEST_PATH).getSpaceQuota()); } finally { if (fs != null) { fs.close(); } } }

Class: org.apache.hadoop.hdfs.TestLease

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("unchecked") @Test public void testFactory() throws Exception { final String[] groups=new String[]{"supergroup"}; final UserGroupInformation[] ugi=new UserGroupInformation[3]; for (int i=0; i < ugi.length; i++) { ugi[i]=UserGroupInformation.createUserForTesting("user" + i,groups); } Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).getFileInfo(anyString()); Mockito.doReturn(new HdfsFileStatus(0,false,1,1024,0,0,new FsPermission((short)777),"owner","group",new byte[0],new byte[0],1010,0,null)).when(mcp).create(anyString(),(FsPermission)anyObject(),anyString(),(EnumSetWritable)anyObject(),anyBoolean(),anyShort(),anyLong(),(List)anyList()); final Configuration conf=new Configuration(); final DFSClient c1=createDFSClientAs(ugi[0],conf); FSDataOutputStream out1=createFsOut(c1,"/out1"); final DFSClient c2=createDFSClientAs(ugi[0],conf); FSDataOutputStream out2=createFsOut(c2,"/out2"); Assert.assertEquals(c1.getLeaseRenewer(),c2.getLeaseRenewer()); final DFSClient c3=createDFSClientAs(ugi[1],conf); FSDataOutputStream out3=createFsOut(c3,"/out3"); Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer()); final DFSClient c4=createDFSClientAs(ugi[1],conf); FSDataOutputStream out4=createFsOut(c4,"/out4"); Assert.assertEquals(c3.getLeaseRenewer(),c4.getLeaseRenewer()); final DFSClient c5=createDFSClientAs(ugi[2],conf); FSDataOutputStream out5=createFsOut(c5,"/out5"); Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer()); Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that we can open up a file for write, move it to another location, * and then create a new file in the previous location, without causing any * lease conflicts. This is possible because we now use unique inode IDs * to identify files to the NameNode. */ @Test public void testLeaseAfterRenameAndRecreate() throws Exception { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { final Path path1=new Path("/test-file"); final String contents1="contents1"; final Path path2=new Path("/test-file-new-location"); final String contents2="contents2"; FileSystem fs=cluster.getFileSystem(); FSDataOutputStream out1=fs.create(path1); out1.writeBytes(contents1); Assert.assertTrue(hasLease(cluster,path1)); Assert.assertEquals(1,leaseCount(cluster)); DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf()); fs2.rename(path1,path2); FSDataOutputStream out2=fs2.create(path1); out2.writeBytes(contents2); out2.close(); Assert.assertTrue(hasLease(cluster,path2)); out1.close(); DistributedFileSystem fs3=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf()); Assert.assertEquals(contents1,DFSTestUtil.readFile(fs3,path2)); Assert.assertEquals(contents2,DFSTestUtil.readFile(fs3,path1)); } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLeaseAfterRename() throws Exception { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { Path p=new Path("/test-file"); Path d=new Path("/test-d"); Path d2=new Path("/test-d-other"); FileSystem fs=cluster.getFileSystem(); FSDataOutputStream out=fs.create(p); out.writeBytes("something"); Assert.assertTrue(hasLease(cluster,p)); Assert.assertEquals(1,leaseCount(cluster)); DistributedFileSystem fs2=(DistributedFileSystem)FileSystem.newInstance(fs.getUri(),fs.getConf()); LOG.info("DMS: rename file into dir"); Path pRenamed=new Path(d,p.getName()); fs2.mkdirs(d); fs2.rename(p,pRenamed); Assert.assertFalse(p + " exists",fs2.exists(p)); Assert.assertTrue(pRenamed + " not found",fs2.exists(pRenamed)); Assert.assertFalse("has lease for " + p,hasLease(cluster,p)); Assert.assertTrue("no lease for " + pRenamed,hasLease(cluster,pRenamed)); Assert.assertEquals(1,leaseCount(cluster)); LOG.info("DMS: rename parent dir"); Path pRenamedAgain=new Path(d2,pRenamed.getName()); fs2.rename(d,d2); Assert.assertFalse(d + " exists",fs2.exists(d)); Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed)); Assert.assertTrue(d2 + " not found",fs2.exists(d2)); Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain)); Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain)); Assert.assertEquals(1,leaseCount(cluster)); LOG.info("DMS: rename parent again"); pRenamed=pRenamedAgain; pRenamedAgain=new Path(new Path(d,d2.getName()),p.getName()); fs2.mkdirs(d); fs2.rename(d2,d); Assert.assertFalse(d2 + " exists",fs2.exists(d2)); Assert.assertFalse("no lease for " + pRenamed,hasLease(cluster,pRenamed)); Assert.assertTrue(d + " not found",fs2.exists(d)); Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain)); Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain)); Assert.assertEquals(1,leaseCount(cluster)); pRenamed=pRenamedAgain; pRenamedAgain=new Path(d2,p.getName()); fs2.rename(pRenamed.getParent(),d2,Options.Rename.OVERWRITE); Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent())); Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed)); Assert.assertTrue(d2 + " not found",fs2.exists(d2)); Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain)); Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain)); Assert.assertEquals(1,leaseCount(cluster)); pRenamed=pRenamedAgain; pRenamedAgain=new Path(d,p.getName()); fs2.rename(pRenamed.getParent(),d,Options.Rename.OVERWRITE); Assert.assertFalse(pRenamed.getParent() + " not found",fs2.exists(pRenamed.getParent())); Assert.assertFalse("has lease for " + pRenamed,hasLease(cluster,pRenamed)); Assert.assertTrue(d + " not found",fs2.exists(d)); Assert.assertTrue(pRenamedAgain + " not found",fs2.exists(pRenamedAgain)); Assert.assertTrue("no lease for " + pRenamedAgain,hasLease(cluster,pRenamedAgain)); Assert.assertEquals(1,leaseCount(cluster)); out.close(); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestLeaseRecovery

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * The following test first creates a file with a few blocks. * It randomly truncates the replica of the last block stored in each datanode. * Finally, it triggers block synchronization to synchronize all stored block. */ @Test public void testBlockSynchronization() throws Exception { final int ORG_FILE_SIZE=3000; Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(5).build(); cluster.waitActive(); DistributedFileSystem dfs=cluster.getFileSystem(); String filestr="/foo"; Path filepath=new Path(filestr); DFSTestUtil.createFile(dfs,filepath,ORG_FILE_SIZE,REPLICATION_NUM,0L); assertTrue(dfs.exists(filepath)); DFSTestUtil.waitReplication(dfs,filepath,REPLICATION_NUM); LocatedBlock locatedblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr); DatanodeInfo[] datanodeinfos=locatedblock.getLocations(); assertEquals(REPLICATION_NUM,datanodeinfos.length); DataNode[] datanodes=new DataNode[REPLICATION_NUM]; for (int i=0; i < REPLICATION_NUM; i++) { datanodes[i]=cluster.getDataNode(datanodeinfos[i].getIpcPort()); assertTrue(datanodes[i] != null); } ExtendedBlock lastblock=locatedblock.getBlock(); DataNode.LOG.info("newblocks=" + lastblock); for (int i=0; i < REPLICATION_NUM; i++) { checkMetaInfo(lastblock,datanodes[i]); } DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName); cluster.getNameNodeRpc().append(filestr,dfs.dfs.clientName); waitLeaseRecovery(cluster); Block[] updatedmetainfo=new Block[REPLICATION_NUM]; long oldSize=lastblock.getNumBytes(); lastblock=TestInterDatanodeProtocol.getLastLocatedBlock(dfs.dfs.getNamenode(),filestr).getBlock(); long currentGS=lastblock.getGenerationStamp(); for (int i=0; i < REPLICATION_NUM; i++) { updatedmetainfo[i]=DataNodeTestUtils.getFSDataset(datanodes[i]).getStoredBlock(lastblock.getBlockPoolId(),lastblock.getBlockId()); assertEquals(lastblock.getBlockId(),updatedmetainfo[i].getBlockId()); assertEquals(oldSize,updatedmetainfo[i].getNumBytes()); assertEquals(currentGS,updatedmetainfo[i].getGenerationStamp()); } System.out.println("Testing that lease recovery cannot happen during safemode."); filestr="/foo.safemode"; filepath=new Path(filestr); dfs.create(filepath,(short)1); cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER,false); assertTrue(dfs.dfs.exists(filestr)); DFSTestUtil.waitReplication(dfs,filepath,(short)1); waitLeaseRecovery(cluster); LeaseManager lm=NameNodeAdapter.getLeaseManager(cluster.getNamesystem()); assertTrue("Found " + lm.countLease() + " lease, expected 1",lm.countLease() == 1); cluster.getNameNodeRpc().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE,false); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestLeaseRecovery2

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * This test makes the client does not renew its lease and also * set the hard lease expiration period to be short 1s. Thus triggering * lease expiration to happen while the client is still alive. * The test makes sure that the lease recovery completes and the client * fails if it continues to write to the file. * @throws Exception */ @Test public void testHardLeaseRecovery() throws Exception { String filestr="/hardLeaseRecovery"; AppendTestUtil.LOG.info("filestr=" + filestr); Path filepath=new Path(filestr); FSDataOutputStream stm=dfs.create(filepath,true,BUF_SIZE,REPLICATION_NUM,BLOCK_SIZE); assertTrue(dfs.dfs.exists(filestr)); int size=AppendTestUtil.nextInt(FILE_SIZE); AppendTestUtil.LOG.info("size=" + size); stm.write(buffer,0,size); AppendTestUtil.LOG.info("hflush"); stm.hflush(); AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()"); dfs.dfs.getLeaseRenewer().interruptAndJoin(); cluster.setLeasePeriod(LONG_LEASE_PERIOD,SHORT_LEASE_PERIOD); LocatedBlocks locatedBlocks; do { Thread.sleep(SHORT_LEASE_PERIOD); locatedBlocks=dfs.dfs.getLocatedBlocks(filestr,0L,size); } while (locatedBlocks.isUnderConstruction()); assertEquals(size,locatedBlocks.getFileLength()); try { stm.write('b'); stm.close(); fail("Writer thread should have been killed"); } catch ( IOException e) { e.printStackTrace(); } AppendTestUtil.LOG.info("File size is good. Now validating sizes from datanodes..."); AppendTestUtil.checkFullFile(dfs,filepath,size,buffer,filestr); }

Class: org.apache.hadoop.hdfs.TestLeaseRenewer

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testThreadName() throws Exception { DFSOutputStream mockStream=Mockito.mock(DFSOutputStream.class); long fileId=789L; Assert.assertFalse("Renewer not initially running",renewer.isRunning()); renewer.put(fileId,mockStream,MOCK_DFSCLIENT); Assert.assertTrue("Renewer should have started running",renewer.isRunning()); String threadName=renewer.getDaemonName(); Assert.assertEquals("LeaseRenewer:myuser@hdfs://nn1/",threadName); renewer.closeFile(fileId,MOCK_DFSCLIENT); renewer.setEmptyTime(Time.now()); long failTime=Time.now() + 5000; while (renewer.isRunning() && Time.now() < failTime) { Thread.sleep(50); } Assert.assertFalse(renewer.isRunning()); }

Class: org.apache.hadoop.hdfs.TestListFilesInFileContext

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input path is a file */ @Test public void testFile() throws IOException { fc.mkdir(TEST_DIR,FsPermission.getDefault(),true); writeFile(fc,FILE1,FILE_LEN); RemoteIterator itor=fc.util().listFiles(FILE1,true); LocatedFileStatus stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); itor=fc.util().listFiles(FILE1,false); stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input path is a directory */ @Test public void testDirectory() throws IOException { fc.mkdir(DIR1,FsPermission.getDefault(),true); RemoteIterator itor=fc.util().listFiles(DIR1,true); assertFalse(itor.hasNext()); itor=fc.util().listFiles(DIR1,false); assertFalse(itor.hasNext()); writeFile(fc,FILE2,FILE_LEN); itor=fc.util().listFiles(DIR1,true); LocatedFileStatus stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fc.makeQualified(FILE2),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); itor=fc.util().listFiles(DIR1,false); stat=itor.next(); assertFalse(itor.hasNext()); assertTrue(stat.isFile()); assertEquals(FILE_LEN,stat.getLen()); assertEquals(fc.makeQualified(FILE2),stat.getPath()); assertEquals(1,stat.getBlockLocations().length); writeFile(fc,FILE1,FILE_LEN); writeFile(fc,FILE3,FILE_LEN); itor=fc.util().listFiles(TEST_DIR,true); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE2),stat.getPath()); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE3),stat.getPath()); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertFalse(itor.hasNext()); itor=fc.util().listFiles(TEST_DIR,false); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertFalse(itor.hasNext()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test when input patch has a symbolic links as its children */ @Test public void testSymbolicLinks() throws IOException { writeFile(fc,FILE1,FILE_LEN); writeFile(fc,FILE2,FILE_LEN); writeFile(fc,FILE3,FILE_LEN); Path dir4=new Path(TEST_DIR,"dir4"); Path dir5=new Path(dir4,"dir5"); Path file4=new Path(dir4,"file4"); fc.createSymlink(DIR1,dir5,true); fc.createSymlink(FILE1,file4,true); RemoteIterator itor=fc.util().listFiles(dir4,true); LocatedFileStatus stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE2),stat.getPath()); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE3),stat.getPath()); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertFalse(itor.hasNext()); itor=fc.util().listFiles(dir4,false); stat=itor.next(); assertTrue(stat.isFile()); assertEquals(fc.makeQualified(FILE1),stat.getPath()); assertFalse(itor.hasNext()); }

Class: org.apache.hadoop.hdfs.TestLocalDFS

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests get/set working directory in DFS. */ @Test(timeout=20000) public void testWorkingDirectory() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fileSys=cluster.getFileSystem(); try { Path orig_path=fileSys.getWorkingDirectory(); assertTrue(orig_path.isAbsolute()); Path file1=new Path("somewhat/random.txt"); writeFile(fileSys,file1); assertTrue(fileSys.exists(new Path(orig_path,file1.toString()))); fileSys.delete(file1,true); Path subdir1=new Path("/somewhere"); fileSys.setWorkingDirectory(subdir1); writeFile(fileSys,file1); cleanupFile(fileSys,new Path(subdir1,file1.toString())); Path subdir2=new Path("else"); fileSys.setWorkingDirectory(subdir2); writeFile(fileSys,file1); readFile(fileSys,file1); cleanupFile(fileSys,new Path(new Path(subdir1,subdir2.toString()),file1.toString())); Path home=fileSys.makeQualified(new Path("/user/" + getUserName(fileSys))); Path fsHome=fileSys.getHomeDirectory(); assertEquals(home,fsHome); } finally { fileSys.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestMiniDFSCluster

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Bring up two clusters and assert that they are in different directories. * @throws Throwable on a failure */ @Test(timeout=100000) public void testDualClusters() throws Throwable { File testDataCluster2=new File(testDataPath,CLUSTER_2); File testDataCluster3=new File(testDataPath,CLUSTER_3); Configuration conf=new HdfsConfiguration(); String c2Path=testDataCluster2.getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c2Path); MiniDFSCluster cluster2=new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster3=null; try { String dataDir2=cluster2.getDataDirectory(); assertEquals(new File(c2Path + "/data"),new File(dataDir2)); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,testDataCluster3.getAbsolutePath()); MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf); cluster3=builder.build(); String dataDir3=cluster3.getDataDirectory(); assertTrue("Clusters are bound to the same directory: " + dataDir2,!dataDir2.equals(dataDir3)); } finally { MiniDFSCluster.shutdownCluster(cluster3); MiniDFSCluster.shutdownCluster(cluster2); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Verify that without system properties the cluster still comes up, provided * the configuration is set * @throws Throwable on a failure */ @Test(timeout=100000) public void testClusterWithoutSystemProperties() throws Throwable { System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA); Configuration conf=new HdfsConfiguration(); File testDataCluster1=new File(testDataPath,CLUSTER_1); String c1Path=testDataCluster1.getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c1Path); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { assertEquals(new File(c1Path + "/data"),new File(cluster.getDataDirectory())); } finally { cluster.shutdown(); } }

InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * MiniDFSCluster should not clobber dfs.datanode.hostname if requested */ @Test(timeout=100000) public void testClusterSetDatanodeHostname() throws Throwable { assumeTrue(System.getProperty("os.name").startsWith("Linux")); Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY,"MYHOST"); File testDataCluster5=new File(testDataPath,CLUSTER_5); String c5Path=testDataCluster5.getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,c5Path); MiniDFSCluster cluster5=new MiniDFSCluster.Builder(conf).numDataNodes(1).checkDataNodeHostConfig(true).build(); try { assertEquals("DataNode hostname config not respected","MYHOST",cluster5.getDataNodes().get(0).getDatanodeId().getHostName()); } finally { MiniDFSCluster.shutdownCluster(cluster5); } }

Class: org.apache.hadoop.hdfs.TestMissingBlocksAlert

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMissingBlocksAlert() throws IOException, InterruptedException, MalformedObjectNameException, AttributeNotFoundException, MBeanException, ReflectionException, InstanceNotFoundException { MiniDFSCluster cluster=null; try { Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,0); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,10); int fileLen=10 * 1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,fileLen / 2); cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); final BlockManager bm=cluster.getNamesystem().getBlockManager(); DistributedFileSystem dfs=cluster.getFileSystem(); DFSTestUtil.createFile(dfs,new Path("/testMissingBlocksAlert/file1"),fileLen,(short)3,0); Path corruptFile=new Path("/testMissingBlocks/corruptFile"); DFSTestUtil.createFile(dfs,corruptFile,fileLen,(short)3,0); ExtendedBlock block=DFSTestUtil.getFirstBlock(dfs,corruptFile); assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0)); FSDataInputStream in=dfs.open(corruptFile); try { in.readFully(new byte[fileLen]); } catch ( ChecksumException ignored) { } in.close(); LOG.info("Waiting for missing blocks count to increase..."); while (dfs.getMissingBlocksCount() <= 0) { Thread.sleep(100); } assertTrue(dfs.getMissingBlocksCount() == 1); assertEquals(4,dfs.getUnderReplicatedBlocksCount()); assertEquals(3,bm.getUnderReplicatedNotMissingBlocks()); MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo"); Assert.assertEquals(1,(long)(Long)mbs.getAttribute(mxbeanName,"NumberOfMissingBlocks")); dfs.delete(corruptFile,true); LOG.info("Waiting for missing blocks count to be zero..."); while (dfs.getMissingBlocksCount() > 0) { Thread.sleep(100); } assertEquals(2,dfs.getUnderReplicatedBlocksCount()); assertEquals(2,bm.getUnderReplicatedNotMissingBlocks()); Assert.assertEquals(0,(long)(Long)mbs.getAttribute(mxbeanName,"NumberOfMissingBlocks")); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestModTime

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Regression test for HDFS-3864 - NN does not update internal file mtime for * OP_CLOSE when reading from the edit log. */ @Test public void testModTimePersistsAfterRestart() throws IOException { final long sleepTime=10; MiniDFSCluster cluster=null; FileSystem fs=null; Configuration conf=new HdfsConfiguration(); try { cluster=new MiniDFSCluster.Builder(conf).build(); fs=cluster.getFileSystem(); Path testPath=new Path("/test"); OutputStream out=fs.create(testPath); long initialModTime=fs.getFileStatus(testPath).getModificationTime(); assertTrue(initialModTime > 0); ThreadUtil.sleepAtLeastIgnoreInterrupts(sleepTime); out.close(); long modTimeAfterClose=fs.getFileStatus(testPath).getModificationTime(); assertTrue(modTimeAfterClose >= initialModTime + sleepTime); cluster.restartNameNode(); long modTimeAfterRestart=fs.getFileStatus(testPath).getModificationTime(); assertEquals(modTimeAfterClose,modTimeAfterRestart); } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests modification time in DFS. */ @Test public void testModTime() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ",numDatanodes,info.length); FileSystem fileSys=cluster.getFileSystem(); int replicas=numDatanodes - 1; assertTrue(fileSys instanceof DistributedFileSystem); try { System.out.println("Creating testdir1 and testdir1/test1.dat."); Path dir1=new Path("testdir1"); Path file1=new Path(dir1,"test1.dat"); DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,(short)replicas,seed); FileStatus stat=fileSys.getFileStatus(file1); long mtime1=stat.getModificationTime(); assertTrue(mtime1 != 0); stat=fileSys.getFileStatus(dir1); long mdir1=stat.getModificationTime(); System.out.println("Creating testdir1/test2.dat."); Path file2=new Path(dir1,"test2.dat"); DFSTestUtil.createFile(fileSys,file2,fileSize,fileSize,blockSize,(short)replicas,seed); stat=fileSys.getFileStatus(file2); stat=fileSys.getFileStatus(dir1); assertTrue(stat.getModificationTime() >= mdir1); mdir1=stat.getModificationTime(); Path dir2=fileSys.makeQualified(new Path("testdir2/")); System.out.println("Creating testdir2 " + dir2); assertTrue(fileSys.mkdirs(dir2)); stat=fileSys.getFileStatus(dir2); long mdir2=stat.getModificationTime(); Path newfile=new Path(dir2,"testnew.dat"); System.out.println("Moving " + file1 + " to "+ newfile); fileSys.rename(file1,newfile); stat=fileSys.getFileStatus(newfile); assertTrue(stat.getModificationTime() == mtime1); stat=fileSys.getFileStatus(dir1); assertTrue(stat.getModificationTime() != mdir1); mdir1=stat.getModificationTime(); stat=fileSys.getFileStatus(dir2); assertTrue(stat.getModificationTime() != mdir2); mdir2=stat.getModificationTime(); System.out.println("Deleting testdir2/testnew.dat."); assertTrue(fileSys.delete(newfile,true)); stat=fileSys.getFileStatus(dir1); assertTrue(stat.getModificationTime() == mdir1); stat=fileSys.getFileStatus(dir2); assertTrue(stat.getModificationTime() != mdir2); mdir2=stat.getModificationTime(); cleanupFile(fileSys,file2); cleanupFile(fileSys,dir1); cleanupFile(fileSys,dir2); } catch ( IOException e) { info=client.datanodeReport(DatanodeReportType.ALL); printDatanodeReport(info); throw e; } finally { fileSys.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestPeerCache

IterativeVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testEviction() throws Exception { final int CAPACITY=3; PeerCache cache=new PeerCache(CAPACITY,100000); DatanodeID dnIds[]=new DatanodeID[CAPACITY + 1]; FakePeer peers[]=new FakePeer[CAPACITY + 1]; for (int i=0; i < dnIds.length; ++i) { dnIds[i]=new DatanodeID("192.168.0.1","fakehostname_" + i,"fake_datanode_id_" + i,100,101,102,103); peers[i]=new FakePeer(dnIds[i],false); } for (int i=0; i < CAPACITY; ++i) { cache.put(dnIds[i],peers[i]); } assertEquals(CAPACITY,cache.size()); cache.put(dnIds[CAPACITY],peers[CAPACITY]); assertEquals(CAPACITY,cache.size()); assertSame(null,cache.get(dnIds[0],false)); for (int i=1; i < CAPACITY; ++i) { Peer peer=cache.get(dnIds[i],false); assertSame(peers[i],peer); assertTrue(!peer.isClosed()); peer.close(); } assertEquals(1,cache.size()); cache.close(); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDomainSocketPeers() throws Exception { final int CAPACITY=3; PeerCache cache=new PeerCache(CAPACITY,100000); DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103); HashMultiset peers=HashMultiset.create(CAPACITY); for (int i=0; i < CAPACITY; ++i) { FakePeer peer=new FakePeer(dnId,i == CAPACITY - 1); peers.add(peer); cache.put(dnId,peer); } assertEquals(CAPACITY,cache.size()); Peer peer=cache.get(dnId,true); assertTrue(peer.getDomainSocket() != null); peers.remove(peer); peer=cache.get(dnId,true); assertTrue(peer == null); while (!peers.isEmpty()) { peer=cache.get(dnId,false); assertTrue(peer != null); assertTrue(!peer.isClosed()); peers.remove(peer); } assertEquals(0,cache.size()); cache.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAddAndRetrieve() throws Exception { PeerCache cache=new PeerCache(3,100000); DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103); FakePeer peer=new FakePeer(dnId,false); cache.put(dnId,peer); assertTrue(!peer.isClosed()); assertEquals(1,cache.size()); assertEquals(peer,cache.get(dnId,false)); assertEquals(0,cache.size()); cache.close(); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testExpiry() throws Exception { final int CAPACITY=3; final int EXPIRY_PERIOD=10; PeerCache cache=new PeerCache(CAPACITY,EXPIRY_PERIOD); DatanodeID dnIds[]=new DatanodeID[CAPACITY]; FakePeer peers[]=new FakePeer[CAPACITY]; for (int i=0; i < CAPACITY; ++i) { dnIds[i]=new DatanodeID("192.168.0.1","fakehostname_" + i,"fake_datanode_id",100,101,102,103); peers[i]=new FakePeer(dnIds[i],false); } for (int i=0; i < CAPACITY; ++i) { cache.put(dnIds[i],peers[i]); } Thread.sleep(EXPIRY_PERIOD * 50); assertEquals(0,cache.size()); for (int i=0; i < CAPACITY; ++i) { assertTrue(peers[i].isClosed()); } Thread.sleep(EXPIRY_PERIOD * 50); cache.close(); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiplePeersWithSameKey() throws Exception { final int CAPACITY=3; PeerCache cache=new PeerCache(CAPACITY,100000); DatanodeID dnId=new DatanodeID("192.168.0.1","fakehostname","fake_datanode_id",100,101,102,103); HashMultiset peers=HashMultiset.create(CAPACITY); for (int i=0; i < CAPACITY; ++i) { FakePeer peer=new FakePeer(dnId,false); peers.add(peer); cache.put(dnId,peer); } assertEquals(CAPACITY,cache.size()); while (!peers.isEmpty()) { Peer peer=cache.get(dnId,false); assertTrue(peer != null); assertTrue(!peer.isClosed()); peers.remove(peer); } assertEquals(0,cache.size()); cache.close(); }

Class: org.apache.hadoop.hdfs.TestPersistBlocks

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testRestartDfsWithAbandonedBlock() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0); MiniDFSCluster cluster=null; long len=0; FSDataOutputStream stream; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs=cluster.getFileSystem(); stream=fs.create(FILE_PATH,true,BLOCK_SIZE,(short)1,BLOCK_SIZE); stream.write(DATA_BEFORE_RESTART); stream.hflush(); while (len < BLOCK_SIZE * (NUM_BLOCKS - 1)) { FileStatus status=fs.getFileStatus(FILE_PATH); len=status.getLen(); Thread.sleep(100); } DFSClient dfsclient=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs); HdfsFileStatus fileStatus=dfsclient.getNamenode().getFileInfo(FILE_NAME); LocatedBlocks blocks=dfsclient.getNamenode().getBlockLocations(FILE_NAME,0,BLOCK_SIZE * NUM_BLOCKS); assertEquals(NUM_BLOCKS,blocks.getLocatedBlocks().size()); LocatedBlock b=blocks.getLastLocatedBlock(); dfsclient.getNamenode().abandonBlock(b.getBlock(),fileStatus.getFileId(),FILE_NAME,dfsclient.clientName); cluster.restartNameNode(); FileStatus status=fs.getFileStatus(FILE_PATH); assertTrue("Length incorrect: " + status.getLen(),status.getLen() == len - BLOCK_SIZE); FSDataInputStream readStream=fs.open(FILE_PATH); try { byte[] verifyBuf=new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE]; IOUtils.readFully(readStream,verifyBuf,0,verifyBuf.length); byte[] expectedBuf=new byte[DATA_BEFORE_RESTART.length - BLOCK_SIZE]; System.arraycopy(DATA_BEFORE_RESTART,0,expectedBuf,0,expectedBuf.length); assertArrayEquals(expectedBuf,verifyBuf); } finally { IOUtils.closeStream(readStream); } } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testRestartWithPartialBlockHflushed() throws IOException { final Configuration conf=new HdfsConfiguration(); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0); MiniDFSCluster cluster=null; FSDataOutputStream stream; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs=cluster.getFileSystem(); NameNode.getAddress(conf).getPort(); stream=fs.create(FILE_PATH,true,BLOCK_SIZE,(short)1,BLOCK_SIZE); stream.write(DATA_BEFORE_RESTART); stream.write((byte)1); stream.hflush(); cluster.restartNameNode(); stream.write((byte)2); stream.hflush(); stream.close(); assertEquals(DATA_BEFORE_RESTART.length + 2,fs.getFileStatus(FILE_PATH).getLen()); FSDataInputStream readStream=fs.open(FILE_PATH); try { byte[] verifyBuf=new byte[DATA_BEFORE_RESTART.length + 2]; IOUtils.readFully(readStream,verifyBuf,0,verifyBuf.length); byte[] expectedBuf=new byte[DATA_BEFORE_RESTART.length + 2]; System.arraycopy(DATA_BEFORE_RESTART,0,expectedBuf,0,DATA_BEFORE_RESTART.length); System.arraycopy(new byte[]{1,2},0,expectedBuf,DATA_BEFORE_RESTART.length,2); assertArrayEquals(expectedBuf,verifyBuf); } finally { IOUtils.closeStream(readStream); } } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testRestartWithAppend() throws IOException { final Configuration conf=new HdfsConfiguration(); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0); MiniDFSCluster cluster=null; FSDataOutputStream stream; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs=cluster.getFileSystem(); NameNode.getAddress(conf).getPort(); stream=fs.create(FILE_PATH,true,BLOCK_SIZE,(short)1,BLOCK_SIZE); stream.write(DATA_BEFORE_RESTART,0,DATA_BEFORE_RESTART.length / 2); stream.close(); stream=fs.append(FILE_PATH,BLOCK_SIZE); stream.write(DATA_BEFORE_RESTART,DATA_BEFORE_RESTART.length / 2,DATA_BEFORE_RESTART.length / 2); stream.close(); assertEquals(DATA_BEFORE_RESTART.length,fs.getFileStatus(FILE_PATH).getLen()); cluster.restartNameNode(); assertEquals(DATA_BEFORE_RESTART.length,fs.getFileStatus(FILE_PATH).getLen()); FSDataInputStream readStream=fs.open(FILE_PATH); try { byte[] verifyBuf=new byte[DATA_BEFORE_RESTART.length]; IOUtils.readFully(readStream,verifyBuf,0,verifyBuf.length); assertArrayEquals(DATA_BEFORE_RESTART,verifyBuf); } finally { IOUtils.closeStream(readStream); } } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestPipelines

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Creates and closes a file of certain length. * Calls append to allow next write() operation to add to the end of it * After write() invocation, calls hflush() to make sure that data sunk through * the pipeline and check the state of the last block's replica. * It supposes to be in RBW state * @throws IOException in case of an error */ @Test public void pipeline_01() throws IOException { final String METHOD_NAME=GenericTestUtils.getMethodName(); if (LOG.isDebugEnabled()) { LOG.debug("Running " + METHOD_NAME); } Path filePath=new Path("/" + METHOD_NAME + ".dat"); DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong()); if (LOG.isDebugEnabled()) { LOG.debug("Invoking append but doing nothing otherwise..."); } FSDataOutputStream ofs=fs.append(filePath); ofs.writeBytes("Some more stuff to write"); ((DFSOutputStream)ofs.getWrappedStream()).hflush(); List lb=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_SIZE - 1,FILE_SIZE).getLocatedBlocks(); String bpid=cluster.getNamesystem().getBlockPoolId(); for ( DataNode dn : cluster.getDataNodes()) { Replica r=DataNodeTestUtils.fetchReplicaInfo(dn,bpid,lb.get(0).getBlock().getBlockId()); assertTrue("Replica on DN " + dn + " shouldn't be null",r != null); assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()",HdfsServerConstants.ReplicaState.RBW,r.getState()); } ofs.close(); }

Class: org.apache.hadoop.hdfs.TestPread

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHedgedReadLoopTooManyTimes() throws IOException { Configuration conf=new Configuration(); int numHedgedReadPoolThreads=5; final int hedgedReadTimeoutMillis=50; conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,numHedgedReadPoolThreads); conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,hedgedReadTimeoutMillis); conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE,0); DFSClientFaultInjector.instance=Mockito.mock(DFSClientFaultInjector.class); DFSClientFaultInjector injector=DFSClientFaultInjector.instance; final int sleepMs=100; Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { if (true) { Thread.sleep(hedgedReadTimeoutMillis + sleepMs); if (DFSClientFaultInjector.exceptionNum.compareAndSet(0,1)) { System.out.println("-------------- throw Checksum Exception"); throw new ChecksumException("ChecksumException test",100); } } return null; } } ).when(injector).fetchFromDatanodeException(); Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { if (true) { Thread.sleep(sleepMs * 2); } return null; } } ).when(injector).readFromDatanodeDelay(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).build(); DistributedFileSystem fileSys=cluster.getFileSystem(); DFSClient dfsClient=fileSys.getClient(); FSDataOutputStream output=null; DFSInputStream input=null; String filename="/hedgedReadMaxOut.dat"; try { Path file=new Path(filename); output=fileSys.create(file,(short)2); byte[] data=new byte[64 * 1024]; output.write(data); output.flush(); output.write(data); output.flush(); output.write(data); output.flush(); output.close(); byte[] buffer=new byte[64 * 1024]; input=dfsClient.open(filename); input.read(0,buffer,0,1024); input.close(); assertEquals(3,input.getHedgedReadOpsLoopNumForTesting()); } catch ( BlockMissingException e) { assertTrue(false); } finally { Mockito.reset(injector); IOUtils.cleanup(null,input); IOUtils.cleanup(null,output); fileSys.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestQuota

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Violate a space quota using files of size < 1 block. Test that block * allocation conservatively assumes that for quota checking the entire * space of the block is used. */ @Test public void testBlockAllocationAdjustsUsageConservatively() throws Exception { Configuration conf=new HdfsConfiguration(); final int BLOCK_SIZE=6 * 1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); DFSAdmin admin=new DFSAdmin(conf); final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs=new Path(webhdfsuri).getFileSystem(conf); try { Path dir=new Path("/test"); Path file1=new Path("/test/test1"); Path file2=new Path("/test/test2"); boolean exceededQuota=false; final int QUOTA_SIZE=3 * BLOCK_SIZE; final int FILE_SIZE=BLOCK_SIZE / 2; ContentSummary c; assertTrue(fs.mkdirs(dir)); runCommand(admin,false,"-setSpaceQuota",Integer.toString(QUOTA_SIZE),dir.toString()); DFSTestUtil.createFile(fs,file1,FILE_SIZE,(short)3,1L); DFSTestUtil.waitReplication(fs,file1,(short)3); c=fs.getContentSummary(dir); checkContentSummary(c,webhdfs.getContentSummary(dir)); assertEquals("Quota is half consumed",QUOTA_SIZE / 2,c.getSpaceConsumed()); try { DFSTestUtil.createFile(fs,file2,FILE_SIZE,(short)3,1L); } catch ( QuotaExceededException e) { exceededQuota=true; } assertTrue("Quota not exceeded",exceededQuota); } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test HDFS operations that change disk space consumed by a directory tree. * namely create, rename, delete, append, and setReplication. * This is based on testNamespaceCommands() above. */ @Test public void testSpaceCommands() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,"512"); conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); final DistributedFileSystem dfs=(DistributedFileSystem)fs; try { int fileLen=1024; short replication=3; int fileSpace=fileLen * replication; assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30"))); final Path quotaDir1=new Path("/nqdir0/qdir1"); dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,4 * fileSpace); ContentSummary c=dfs.getContentSummary(quotaDir1); assertEquals(c.getSpaceQuota(),4 * fileSpace); final Path quotaDir20=new Path("/nqdir0/qdir1/qdir20"); dfs.setQuota(quotaDir20,HdfsConstants.QUOTA_DONT_SET,6 * fileSpace); c=dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceQuota(),6 * fileSpace); final Path quotaDir21=new Path("/nqdir0/qdir1/qdir21"); assertTrue(dfs.mkdirs(quotaDir21)); dfs.setQuota(quotaDir21,HdfsConstants.QUOTA_DONT_SET,2 * fileSpace); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceQuota(),2 * fileSpace); Path tempPath=new Path(quotaDir21,"nqdir32"); assertTrue(dfs.mkdirs(tempPath)); DFSTestUtil.createFile(dfs,new Path(tempPath,"fileDir/file1"),fileLen,replication,0); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceConsumed(),fileSpace); boolean hasException=false; try { DFSTestUtil.createFile(dfs,new Path(quotaDir21,"nqdir33/file2"),2 * fileLen,replication,0); } catch ( DSQuotaExceededException e) { hasException=true; } assertTrue(hasException); assertTrue(dfs.delete(new Path(quotaDir21,"nqdir33"),true)); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceConsumed(),fileSpace); assertEquals(c.getSpaceQuota(),2 * fileSpace); c=dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceConsumed(),0); Path dstPath=new Path(quotaDir20,"nqdir30"); Path srcPath=new Path(quotaDir21,"nqdir32"); assertTrue(dfs.rename(srcPath,dstPath)); c=dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceConsumed(),fileSpace); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getSpaceConsumed(),fileSpace); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceConsumed(),0); final Path file2=new Path(dstPath,"fileDir/file2"); int file2Len=2 * fileLen; DFSTestUtil.createFile(dfs,file2,file2Len,replication,0); c=dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceConsumed(),3 * fileSpace); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceConsumed(),0); hasException=false; try { assertFalse(dfs.rename(dstPath,srcPath)); } catch ( DSQuotaExceededException e) { hasException=true; } assertTrue(hasException); assertFalse(dfs.exists(srcPath)); assertTrue(dfs.exists(dstPath)); c=dfs.getContentSummary(quotaDir20); assertEquals(c.getSpaceConsumed(),3 * fileSpace); c=dfs.getContentSummary(quotaDir21); assertEquals(c.getSpaceConsumed(),0); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getSpaceQuota(),4 * fileSpace); c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),3 * fileSpace); OutputStream out=dfs.append(file2); out.write(new byte[fileLen]); out.close(); file2Len+=fileLen; c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),4 * fileSpace); dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,5 * fileSpace); out=dfs.append(file2); hasException=false; try { out.write(new byte[fileLen + 1024]); out.flush(); out.close(); } catch ( DSQuotaExceededException e) { hasException=true; IOUtils.closeStream(out); } assertTrue(hasException); file2Len+=fileLen; c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),5 * fileSpace); dfs.setReplication(file2,(short)(replication - 1)); c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),5 * fileSpace - file2Len); hasException=false; try { dfs.setReplication(file2,(short)(replication + 1)); } catch ( DSQuotaExceededException e) { hasException=true; } assertTrue(hasException); c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),5 * fileSpace - file2Len); dfs.setQuota(quotaDir1,HdfsConstants.QUOTA_DONT_SET,10 * fileSpace); dfs.setQuota(quotaDir20,HdfsConstants.QUOTA_DONT_SET,10 * fileSpace); dfs.setReplication(file2,(short)(replication + 1)); c=dfs.getContentSummary(dstPath); assertEquals(c.getSpaceConsumed(),5 * fileSpace + file2Len); final Path quotaDir2053=new Path("/hdfs-2053"); assertTrue(dfs.mkdirs(quotaDir2053)); final Path quotaDir2053_A=new Path(quotaDir2053,"A"); assertTrue(dfs.mkdirs(quotaDir2053_A)); final Path quotaDir2053_B=new Path(quotaDir2053,"B"); assertTrue(dfs.mkdirs(quotaDir2053_B)); final Path quotaDir2053_C=new Path(quotaDir2053,"C"); assertTrue(dfs.mkdirs(quotaDir2053_C)); int sizeFactorA=1; int sizeFactorB=2; int sizeFactorC=4; dfs.setQuota(quotaDir2053_C,HdfsConstants.QUOTA_DONT_SET,(sizeFactorC + 1) * fileSpace); c=dfs.getContentSummary(quotaDir2053_C); assertEquals(c.getSpaceQuota(),(sizeFactorC + 1) * fileSpace); DFSTestUtil.createFile(dfs,new Path(quotaDir2053_A,"fileA"),sizeFactorA * fileLen,replication,0); c=dfs.getContentSummary(quotaDir2053_A); assertEquals(c.getSpaceConsumed(),sizeFactorA * fileSpace); DFSTestUtil.createFile(dfs,new Path(quotaDir2053_B,"fileB"),sizeFactorB * fileLen,replication,0); c=dfs.getContentSummary(quotaDir2053_B); assertEquals(c.getSpaceConsumed(),sizeFactorB * fileSpace); DFSTestUtil.createFile(dfs,new Path(quotaDir2053_C,"fileC"),sizeFactorC * fileLen,replication,0); c=dfs.getContentSummary(quotaDir2053_C); assertEquals(c.getSpaceConsumed(),sizeFactorC * fileSpace); c=dfs.getContentSummary(quotaDir2053); assertEquals(c.getSpaceConsumed(),(sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace); assertEquals(20,cluster.getNamesystem().getFSDirectory().getYieldCount()); } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Like the previous test but create many files. This covers bugs where * the quota adjustment is incorrect but it takes many files to accrue * a big enough accounting error to violate the quota. */ @Test public void testMultipleFilesSmallerThanOneBlock() throws Exception { Configuration conf=new HdfsConfiguration(); final int BLOCK_SIZE=6 * 1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,BLOCK_SIZE); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true); conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); DFSAdmin admin=new DFSAdmin(conf); final String nnAddr=conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); final String webhdfsuri=WebHdfsFileSystem.SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs=new Path(webhdfsuri).getFileSystem(conf); try { long nsQuota=FSImageTestUtil.getNSQuota(cluster.getNameNode().getNamesystem()); assertTrue("Default namespace quota expected as long max. But the value is :" + nsQuota,nsQuota == Long.MAX_VALUE); Path dir=new Path("/test"); boolean exceededQuota=false; ContentSummary c; final int FILE_SIZE=1024; final int QUOTA_SIZE=32 * (int)fs.getDefaultBlockSize(dir); assertEquals(6 * 1024,fs.getDefaultBlockSize(dir)); assertEquals(192 * 1024,QUOTA_SIZE); assertTrue(fs.mkdirs(dir)); runCommand(admin,false,"-setSpaceQuota",Integer.toString(QUOTA_SIZE),dir.toString()); for (int i=0; i < 59; i++) { Path file=new Path("/test/test" + i); DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L); DFSTestUtil.waitReplication(fs,file,(short)3); } c=fs.getContentSummary(dir); checkContentSummary(c,webhdfs.getContentSummary(dir)); assertEquals("Invalid space consumed",59 * FILE_SIZE * 3,c.getSpaceConsumed()); assertEquals("Invalid space consumed",QUOTA_SIZE - (59 * FILE_SIZE * 3),3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE)); try { Path file=new Path("/test/test59"); DFSTestUtil.createFile(fs,file,FILE_SIZE,(short)3,1L); DFSTestUtil.waitReplication(fs,file,(short)3); } catch ( QuotaExceededException e) { exceededQuota=true; } assertTrue("Quota not exceeded",exceededQuota); assertEquals(2,cluster.getNamesystem().getFSDirectory().getYieldCount()); } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test commands that change the size of the name space: * mkdirs, rename, and delete */ @Test public void testNamespaceCommands() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final DistributedFileSystem dfs=cluster.getFileSystem(); try { assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30"))); final Path quotaDir1=new Path("/nqdir0/qdir1"); dfs.setQuota(quotaDir1,6,HdfsConstants.QUOTA_DONT_SET); ContentSummary c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),3); assertEquals(c.getQuota(),6); final Path quotaDir2=new Path("/nqdir0/qdir1/qdir20"); dfs.setQuota(quotaDir2,7,HdfsConstants.QUOTA_DONT_SET); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),2); assertEquals(c.getQuota(),7); final Path quotaDir3=new Path("/nqdir0/qdir1/qdir21"); assertTrue(dfs.mkdirs(quotaDir3)); dfs.setQuota(quotaDir3,2,HdfsConstants.QUOTA_DONT_SET); c=dfs.getContentSummary(quotaDir3); assertEquals(c.getDirectoryCount(),1); assertEquals(c.getQuota(),2); Path tempPath=new Path(quotaDir3,"nqdir32"); assertTrue(dfs.mkdirs(tempPath)); c=dfs.getContentSummary(quotaDir3); assertEquals(c.getDirectoryCount(),2); assertEquals(c.getQuota(),2); tempPath=new Path(quotaDir3,"nqdir33"); boolean hasException=false; try { assertFalse(dfs.mkdirs(tempPath)); } catch ( NSQuotaExceededException e) { hasException=true; } assertTrue(hasException); c=dfs.getContentSummary(quotaDir3); assertEquals(c.getDirectoryCount(),2); assertEquals(c.getQuota(),2); tempPath=new Path(quotaDir2,"nqdir31"); assertTrue(dfs.mkdirs(tempPath)); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),3); assertEquals(c.getQuota(),7); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),6); assertEquals(c.getQuota(),6); tempPath=new Path(quotaDir2,"nqdir33"); hasException=false; try { assertFalse(dfs.mkdirs(tempPath)); } catch ( NSQuotaExceededException e) { hasException=true; } assertTrue(hasException); tempPath=new Path(quotaDir2,"nqdir30"); dfs.rename(new Path(quotaDir3,"nqdir32"),tempPath); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),4); assertEquals(c.getQuota(),7); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),6); assertEquals(c.getQuota(),6); hasException=false; try { assertFalse(dfs.rename(tempPath,quotaDir3)); } catch ( NSQuotaExceededException e) { hasException=true; } assertTrue(hasException); assertTrue(dfs.exists(tempPath)); assertFalse(dfs.exists(new Path(quotaDir3,"nqdir30"))); hasException=false; try { assertFalse(dfs.rename(tempPath,new Path(quotaDir3,"nqdir32"))); } catch ( QuotaExceededException e) { hasException=true; } assertTrue(hasException); assertTrue(dfs.exists(tempPath)); assertFalse(dfs.exists(new Path(quotaDir3,"nqdir32"))); assertTrue(dfs.rename(tempPath,new Path("/nqdir0"))); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),2); assertEquals(c.getQuota(),7); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),4); assertEquals(c.getQuota(),6); assertTrue(dfs.mkdirs(new Path("/nqdir0/nqdir30/nqdir33"))); hasException=false; try { assertFalse(dfs.rename(new Path("/nqdir0/nqdir30"),tempPath)); } catch ( NSQuotaExceededException e) { hasException=true; } assertTrue(hasException); assertTrue(dfs.rename(quotaDir3,quotaDir2)); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),4); assertEquals(c.getQuota(),6); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),3); assertEquals(c.getQuota(),7); tempPath=new Path(quotaDir2,"qdir21"); c=dfs.getContentSummary(tempPath); assertEquals(c.getDirectoryCount(),1); assertEquals(c.getQuota(),2); dfs.delete(tempPath,true); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),2); assertEquals(c.getQuota(),7); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),3); assertEquals(c.getQuota(),6); assertTrue(dfs.rename(new Path("/nqdir0/nqdir30"),quotaDir2)); c=dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(),5); assertEquals(c.getQuota(),7); c=dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(),6); assertEquals(c.getQuota(),6); assertEquals(14,cluster.getNamesystem().getFSDirectory().getYieldCount()); } finally { cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test quota related commands: * setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count */ @Test public void testQuotaCommands() throws Exception { final Configuration conf=new HdfsConfiguration(); final int DEFAULT_BLOCK_SIZE=512; conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,2); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); final FileSystem fs=cluster.getFileSystem(); assertTrue("Not a HDFS: " + fs.getUri(),fs instanceof DistributedFileSystem); final DistributedFileSystem dfs=(DistributedFileSystem)fs; DFSAdmin admin=new DFSAdmin(conf); try { final int fileLen=1024; final short replication=5; final long spaceQuota=fileLen * replication * 15 / 8; final Path parent=new Path("/test"); assertTrue(dfs.mkdirs(parent)); String[] args=new String[]{"-setQuota","3",parent.toString()}; runCommand(admin,args,false); runCommand(admin,false,"-setSpaceQuota","2t",parent.toString()); assertEquals(2L << 40,dfs.getContentSummary(parent).getSpaceQuota()); runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota),parent.toString()); final Path childDir0=new Path(parent,"data0"); assertTrue(dfs.mkdirs(childDir0)); final Path childFile0=new Path(parent,"datafile0"); DFSTestUtil.createFile(fs,childFile0,fileLen,replication,0); ContentSummary c=dfs.getContentSummary(parent); assertEquals(c.getFileCount() + c.getDirectoryCount(),3); assertEquals(c.getQuota(),3); assertEquals(c.getSpaceConsumed(),fileLen * replication); assertEquals(c.getSpaceQuota(),spaceQuota); c=dfs.getContentSummary(childDir0); assertEquals(c.getFileCount() + c.getDirectoryCount(),1); assertEquals(c.getQuota(),-1); c=dfs.getContentSummary(parent); assertEquals(c.getSpaceConsumed(),fileLen * replication); final Path childDir1=new Path(parent,"data1"); boolean hasException=false; try { assertFalse(dfs.mkdirs(childDir1)); } catch ( QuotaExceededException e) { hasException=true; } assertTrue(hasException); OutputStream fout; final Path childFile1=new Path(parent,"datafile1"); hasException=false; try { fout=dfs.create(childFile1); } catch ( QuotaExceededException e) { hasException=true; } assertTrue(hasException); runCommand(admin,new String[]{"-clrQuota",parent.toString()},false); c=dfs.getContentSummary(parent); assertEquals(c.getQuota(),-1); assertEquals(c.getSpaceQuota(),spaceQuota); runCommand(admin,new String[]{"-clrQuota",childDir0.toString()},false); c=dfs.getContentSummary(childDir0); assertEquals(c.getQuota(),-1); fout=dfs.create(childFile1,replication); try { fout.write(new byte[fileLen]); fout.close(); Assert.fail(); } catch ( QuotaExceededException e) { IOUtils.closeStream(fout); } dfs.delete(childFile1,false); runCommand(admin,false,"-clrSpaceQuota",parent.toString()); c=dfs.getContentSummary(parent); assertEquals(c.getQuota(),-1); assertEquals(c.getSpaceQuota(),-1); DFSTestUtil.createFile(dfs,childFile1,fileLen,replication,0); args=new String[]{"-setQuota","1",parent.toString()}; runCommand(admin,args,false); runCommand(admin,false,"-setSpaceQuota",Integer.toString(fileLen),args[2]); args=new String[]{"-setQuota","1",childDir0.toString()}; runCommand(admin,args,false); hasException=false; try { assertFalse(dfs.mkdirs(new Path(childDir0,"in"))); } catch ( QuotaExceededException e) { hasException=true; } assertTrue(hasException); c=dfs.getContentSummary(childDir0); assertEquals(c.getDirectoryCount() + c.getFileCount(),1); assertEquals(c.getQuota(),1); Path nonExistentPath=new Path("/test1"); assertFalse(dfs.exists(nonExistentPath)); args=new String[]{"-setQuota","1",nonExistentPath.toString()}; runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota","1g",nonExistentPath.toString()); assertTrue(dfs.isFile(childFile0)); args[1]=childFile0.toString(); runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota","1t",args[1]); args[0]="-clrQuota"; runCommand(admin,args,true); runCommand(admin,true,"-clrSpaceQuota",args[1]); args[1]=nonExistentPath.toString(); runCommand(admin,args,true); runCommand(admin,true,"-clrSpaceQuota",args[1]); args=new String[]{"-setQuota","0",parent.toString()}; runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota","0",args[2]); args[1]="-1"; runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota",args[1],args[2]); args[1]=String.valueOf(Long.MAX_VALUE + 1L); runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota",args[1],args[2]); args[1]="33aa1.5"; runCommand(admin,args,true); runCommand(admin,true,"-setSpaceQuota",args[1],args[2]); runCommand(admin,true,"-setSpaceQuota",(Long.MAX_VALUE / 1024 / 1024 + 1024) + "m",args[2]); final String username="userxx"; UserGroupInformation ugi=UserGroupInformation.createUserForTesting(username,new String[]{"groupyy"}); final String[] args2=args.clone(); ugi.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { assertEquals("Not running as new user",username,UserGroupInformation.getCurrentUser().getShortUserName()); DFSAdmin userAdmin=new DFSAdmin(conf); args2[1]="100"; runCommand(userAdmin,args2,true); runCommand(userAdmin,true,"-setSpaceQuota","1g",args2[2]); String[] args3=new String[]{"-clrQuota",parent.toString()}; runCommand(userAdmin,args3,true); runCommand(userAdmin,true,"-clrSpaceQuota",args3[1]); return null; } } ); runCommand(admin,true,"-clrQuota","/"); runCommand(admin,false,"-setQuota","1000000","/"); runCommand(admin,true,"-clrQuota","/"); runCommand(admin,false,"-clrSpaceQuota","/"); runCommand(admin,new String[]{"-clrQuota",parent.toString()},false); runCommand(admin,false,"-clrSpaceQuota",parent.toString()); final Path childDir2=new Path(parent,"data2"); assertTrue(dfs.mkdirs(childDir2)); final Path childFile2=new Path(childDir2,"datafile2"); final Path childFile3=new Path(childDir2,"datafile3"); final long spaceQuota2=DEFAULT_BLOCK_SIZE * replication; final long fileLen2=DEFAULT_BLOCK_SIZE; runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),childDir2.toString()); runCommand(admin,false,"-clrSpaceQuota",childDir2.toString()); DFSTestUtil.createFile(fs,childFile2,fileLen2,replication,0); runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),childDir2.toString()); hasException=false; try { DFSTestUtil.createFile(fs,childFile3,fileLen2,replication,0); } catch ( DSQuotaExceededException e) { hasException=true; } assertTrue(hasException); final Path childFile4=new Path("/","datafile2"); final Path childFile5=new Path("/","datafile3"); runCommand(admin,true,"-clrQuota","/"); runCommand(admin,false,"-clrSpaceQuota","/"); runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),"/"); runCommand(admin,false,"-clrSpaceQuota","/"); DFSTestUtil.createFile(fs,childFile4,fileLen2,replication,0); runCommand(admin,false,"-setSpaceQuota",Long.toString(spaceQuota2),"/"); hasException=false; try { DFSTestUtil.createFile(fs,childFile5,fileLen2,replication,0); } catch ( DSQuotaExceededException e) { hasException=true; } assertTrue(hasException); assertEquals(4,cluster.getNamesystem().getFSDirectory().getYieldCount()); } finally { cluster.shutdown(); } }

EqualityVerifier 
/** * Tests to make sure we're getting human readable Quota exception messages * Test for @link{ NSQuotaExceededException, DSQuotaExceededException} * @throws Exception */ @Test public void testDSQuotaExceededExceptionIsHumanReadable() throws Exception { Integer bytes=1024; try { throw new DSQuotaExceededException(bytes,bytes); } catch ( DSQuotaExceededException e) { assertEquals("The DiskSpace quota is exceeded: quota = 1024 B = 1 KB" + " but diskspace consumed = 1024 B = 1 KB",e.getMessage()); } }

Class: org.apache.hadoop.hdfs.TestReplaceDatanodeOnFailure

IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Test DEFAULT ReplaceDatanodeOnFailure policy. */ @Test public void testDefaultPolicy() throws Exception { final ReplaceDatanodeOnFailure p=ReplaceDatanodeOnFailure.DEFAULT; final DatanodeInfo[] infos=new DatanodeInfo[5]; final DatanodeInfo[][] datanodes=new DatanodeInfo[infos.length + 1][]; datanodes[0]=new DatanodeInfo[0]; for (int i=0; i < infos.length; ) { infos[i]=DFSTestUtil.getLocalDatanodeInfo(50020 + i); i++; datanodes[i]=new DatanodeInfo[i]; System.arraycopy(infos,0,datanodes[i],0,datanodes[i].length); } final boolean[] isAppend={true,true,false,false}; final boolean[] isHflushed={true,false,true,false}; for (short replication=1; replication <= infos.length; replication++) { for (int nExistings=0; nExistings < datanodes.length; nExistings++) { final DatanodeInfo[] existings=datanodes[nExistings]; Assert.assertEquals(nExistings,existings.length); for (int i=0; i < isAppend.length; i++) { for (int j=0; j < isHflushed.length; j++) { final int half=replication / 2; final boolean enoughReplica=replication <= nExistings; final boolean noReplica=nExistings == 0; final boolean replicationL3=replication < 3; final boolean existingsLEhalf=nExistings <= half; final boolean isAH=isAppend[i] || isHflushed[j]; final boolean expected; if (enoughReplica || noReplica || replicationL3) { expected=false; } else { expected=isAH || existingsLEhalf; } final boolean computed=p.satisfy(replication,existings,isAppend[i],isHflushed[j]); try { Assert.assertEquals(expected,computed); } catch ( AssertionError e) { final String s="replication=" + replication + "\nnExistings ="+ nExistings+ "\nisAppend ="+ isAppend[i]+ "\nisHflushed ="+ isHflushed[j]; throw new RuntimeException(s,e); } } } } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAppend() throws Exception { final Configuration conf=new HdfsConfiguration(); final short REPLICATION=(short)3; Assert.assertEquals(ReplaceDatanodeOnFailure.DEFAULT,ReplaceDatanodeOnFailure.get(conf)); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { final DistributedFileSystem fs=cluster.getFileSystem(); final Path f=new Path(DIR,"testAppend"); { LOG.info("create an empty file " + f); fs.create(f,REPLICATION).close(); final FileStatus status=fs.getFileStatus(f); Assert.assertEquals(REPLICATION,status.getReplication()); Assert.assertEquals(0L,status.getLen()); } final byte[] bytes=new byte[1000]; { LOG.info("append " + bytes.length + " bytes to "+ f); final FSDataOutputStream out=fs.append(f); out.write(bytes); out.close(); final FileStatus status=fs.getFileStatus(f); Assert.assertEquals(REPLICATION,status.getReplication()); Assert.assertEquals(bytes.length,status.getLen()); } { LOG.info("append another " + bytes.length + " bytes to "+ f); try { final FSDataOutputStream out=fs.append(f); out.write(bytes); out.close(); Assert.fail(); } catch ( IOException ioe) { LOG.info("This exception is expected",ioe); } } } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Test replace datanode on failure. */ @Test public void testReplaceDatanodeOnFailure() throws Exception { final Configuration conf=new HdfsConfiguration(); ReplaceDatanodeOnFailure.ALWAYS.write(conf); final String[] racks=new String[REPLICATION]; Arrays.fill(racks,RACK0); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).racks(racks).numDataNodes(REPLICATION).build(); try { final DistributedFileSystem fs=cluster.getFileSystem(); final Path dir=new Path(DIR); final SlowWriter[] slowwriters=new SlowWriter[10]; for (int i=1; i <= slowwriters.length; i++) { slowwriters[i - 1]=new SlowWriter(fs,new Path(dir,"file" + i),i * 200L); } for ( SlowWriter s : slowwriters) { s.start(); } sleepSeconds(1); cluster.startDataNodes(conf,2,true,null,new String[]{RACK1,RACK1}); cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION)); sleepSeconds(5); for ( SlowWriter s : slowwriters) { s.checkReplication(); s.interruptRunning(); } for ( SlowWriter s : slowwriters) { s.joinAndClose(); } LOG.info("Verify the file"); for (int i=0; i < slowwriters.length; i++) { LOG.info(slowwriters[i].filepath + ": length=" + fs.getFileStatus(slowwriters[i].filepath).getLen()); FSDataInputStream in=null; try { in=fs.open(slowwriters[i].filepath); for (int j=0, x; (x=in.read()) != -1; j++) { Assert.assertEquals(j,x); } } finally { IOUtils.closeStream(in); } } } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestReplication

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPendingReplicationRetry() throws IOException { MiniDFSCluster cluster=null; int numDataNodes=4; String testFile="/replication-test-file"; Path testPath=new Path(testFile); byte buffer[]=new byte[1024]; for (int i=0; i < buffer.length; i++) { buffer[i]='1'; } try { Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_REPLICATION_KEY,Integer.toString(numDataNodes)); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); DFSClient dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf); OutputStream out=cluster.getFileSystem().create(testPath); out.write(buffer); out.close(); waitForBlockReplication(testFile,dfsClient.getNamenode(),numDataNodes,-1); ExtendedBlock block=dfsClient.getNamenode().getBlockLocations(testFile,0,Long.MAX_VALUE).get(0).getBlock(); cluster.shutdown(); cluster=null; for (int i=0; i < 25; i++) { buffer[i]='0'; } int fileCount=0; for (int dnIndex=0; dnIndex < 3; dnIndex++) { File blockFile=MiniDFSCluster.getBlockFile(dnIndex,block); LOG.info("Checking for file " + blockFile); if (blockFile != null && blockFile.exists()) { if (fileCount == 0) { LOG.info("Deleting file " + blockFile); assertTrue(blockFile.delete()); } else { LOG.info("Corrupting file " + blockFile); long len=blockFile.length(); assertTrue(len > 50); RandomAccessFile blockOut=new RandomAccessFile(blockFile,"rw"); try { blockOut.seek(len / 3); blockOut.write(buffer,0,25); } finally { blockOut.close(); } } fileCount++; } } assertEquals(3,fileCount); LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs"); conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_REPLICATION_KEY,Integer.toString(numDataNodes)); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2)); conf.set("dfs.datanode.block.write.timeout.sec",Integer.toString(5)); conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"0.75f"); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes * 2).format(false).build(); cluster.waitActive(); dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf); waitForBlockReplication(testFile,dfsClient.getNamenode(),numDataNodes,-1); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBadBlockReportOnTransfer() throws Exception { Configuration conf=new HdfsConfiguration(); FileSystem fs=null; DFSClient dfsClient=null; LocatedBlocks blocks=null; int replicaCount=0; short replFactor=1; MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); fs=cluster.getFileSystem(); dfsClient=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf); Path file1=new Path("/tmp/testBadBlockReportOnTransfer/file1"); DFSTestUtil.createFile(fs,file1,1024,replFactor,0); DFSTestUtil.waitReplication(fs,file1,replFactor); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,file1); int blockFilesCorrupted=cluster.corruptBlockOnDataNodes(block); assertEquals("Corrupted too few blocks",replFactor,blockFilesCorrupted); replFactor=2; fs.setReplication(file1,replFactor); blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE); while (blocks.get(0).isCorrupt() != true) { try { LOG.info("Waiting until block is marked as corrupt..."); Thread.sleep(1000); } catch ( InterruptedException ie) { } blocks=dfsClient.getNamenode().getBlockLocations(file1.toString(),0,Long.MAX_VALUE); } replicaCount=blocks.get(0).getLocations().length; assertTrue(replicaCount == 1); cluster.shutdown(); }

Class: org.apache.hadoop.hdfs.TestReservedRawPaths

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testListDotReserved() throws Exception { final Path baseFileRaw=new Path("/.reserved/raw/base"); final int len=8192; DFSTestUtil.createFile(fs,baseFileRaw,len,(short)1,0xFEED); try { fs.listStatus(new Path("/.reserved")); fail("expected FNFE"); } catch ( FileNotFoundException e) { assertExceptionContains("/.reserved does not exist",e); } try { fs.listStatus(new Path("/.reserved/.inodes")); fail("expected FNFE"); } catch ( FileNotFoundException e) { assertExceptionContains("/.reserved/.inodes does not exist",e); } final FileStatus[] fileStatuses=fs.listStatus(new Path("/.reserved/raw")); assertEquals("expected 1 entry",fileStatuses.length,1); assertMatches(fileStatuses[0].getPath().toString(),"/.reserved/raw/base"); }

EqualityVerifier 
@Test(timeout=120000) public void testListRecursive() throws Exception { Path rootPath=new Path("/"); Path p=rootPath; for (int i=0; i < 3; i++) { p=new Path(p,"dir" + i); fs.mkdirs(p); } Path curPath=new Path("/.reserved/raw"); int cnt=0; FileStatus[] fileStatuses=fs.listStatus(curPath); while (fileStatuses != null && fileStatuses.length > 0) { FileStatus f=fileStatuses[0]; assertMatches(f.getPath().toString(),"/.reserved/raw"); curPath=Path.getPathWithoutSchemeAndAuthority(f.getPath()); cnt++; fileStatuses=fs.listStatus(curPath); } assertEquals(3,cnt); }

Class: org.apache.hadoop.hdfs.TestRollingUpgrade

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRollingUpgradeWithQJM() throws Exception { String nnDirPrefix=MiniDFSCluster.getBaseDirectory() + "/nn/"; final File nn1Dir=new File(nnDirPrefix + "image1"); final File nn2Dir=new File(nnDirPrefix + "image2"); LOG.info("nn1Dir=" + nn1Dir); LOG.info("nn2Dir=" + nn2Dir); final Configuration conf=new HdfsConfiguration(); final MiniJournalCluster mjc=new MiniJournalCluster.Builder(conf).build(); setConf(conf,nn1Dir,mjc); { final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).checkExitOnShutdown(false).build(); cluster.shutdown(); } MiniDFSCluster cluster2=null; try { FileUtil.fullyDelete(nn2Dir); FileUtil.copy(nn1Dir,FileSystem.getLocal(conf).getRaw(),new Path(nn2Dir.getAbsolutePath()),false,conf); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).manageNameDfsDirs(false).checkExitOnShutdown(false).build(); final Path foo=new Path("/foo"); final Path bar=new Path("/bar"); final Path baz=new Path("/baz"); final RollingUpgradeInfo info1; { final DistributedFileSystem dfs=cluster.getFileSystem(); dfs.mkdirs(foo); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); info1=dfs.rollingUpgrade(RollingUpgradeAction.PREPARE); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); LOG.info("START\n" + info1); Assert.assertEquals(info1,dfs.rollingUpgrade(RollingUpgradeAction.QUERY)); dfs.mkdirs(bar); cluster.shutdown(); } final Configuration conf2=setConf(new Configuration(),nn2Dir,mjc); cluster2=new MiniDFSCluster.Builder(conf2).numDataNodes(0).format(false).manageNameDfsDirs(false).build(); final DistributedFileSystem dfs2=cluster2.getFileSystem(); Assert.assertTrue(dfs2.exists(foo)); Assert.assertTrue(dfs2.exists(bar)); Assert.assertFalse(dfs2.exists(baz)); Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); dfs2.mkdirs(baz); LOG.info("RESTART cluster 2"); cluster2.restartNameNode(); Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); Assert.assertTrue(dfs2.exists(foo)); Assert.assertTrue(dfs2.exists(bar)); Assert.assertTrue(dfs2.exists(baz)); try { cluster2.restartNameNode("-upgrade"); } catch ( IOException e) { LOG.info("The exception is expected.",e); } LOG.info("RESTART cluster 2 again"); cluster2.restartNameNode(); Assert.assertEquals(info1,dfs2.rollingUpgrade(RollingUpgradeAction.QUERY)); Assert.assertTrue(dfs2.exists(foo)); Assert.assertTrue(dfs2.exists(bar)); Assert.assertTrue(dfs2.exists(baz)); final RollingUpgradeInfo finalize=dfs2.rollingUpgrade(RollingUpgradeAction.FINALIZE); LOG.info("FINALIZE: " + finalize); Assert.assertEquals(info1.getStartTime(),finalize.getStartTime()); LOG.info("RESTART cluster 2 with regular startup option"); cluster2.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster2.restartNameNode(); Assert.assertTrue(dfs2.exists(foo)); Assert.assertTrue(dfs2.exists(bar)); Assert.assertTrue(dfs2.exists(baz)); } finally { if (cluster2 != null) cluster2.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception { final Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final DFSAdmin dfsadmin=new DFSAdmin(conf); DataNode dn=cluster.getDataNodes().get(0); final String dnAddr=dn.getDatanodeId().getIpcAddr(false); final String[] args1={"-getDatanodeInfo",dnAddr}; Assert.assertEquals(0,dfsadmin.run(args1)); final String[] args2={"-shutdownDatanode",dnAddr,"upgrade"}; Assert.assertEquals(0,dfsadmin.run(args2)); Thread.sleep(2000); Assert.assertFalse("DataNode should exit",dn.isDatanodeUp()); Assert.assertEquals(-1,dfsadmin.run(args1)); } finally { if (cluster != null) cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestRollingUpgradeRollback

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRollbackCommand() throws Exception { final Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; final Path foo=new Path("/foo"); final Path bar=new Path("/bar"); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); final DistributedFileSystem dfs=cluster.getFileSystem(); final DFSAdmin dfsadmin=new DFSAdmin(conf); dfs.mkdirs(foo); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); Assert.assertEquals(0,dfsadmin.run(new String[]{"-rollingUpgrade","prepare"})); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); dfs.mkdirs(bar); NNStorage storage=cluster.getNamesystem().getFSImage().getStorage(); checkNNStorage(storage,3,-1); } finally { if (cluster != null) { cluster.shutdown(); } } NameNode nn=null; try { nn=NameNode.createNameNode(new String[]{"-rollingUpgrade","rollback"},conf); INode fooNode=nn.getNamesystem().getFSDirectory().getINode4Write(foo.toString()); Assert.assertNotNull(fooNode); INode barNode=nn.getNamesystem().getFSDirectory().getINode4Write(bar.toString()); Assert.assertNull(barNode); NNStorage storage=nn.getNamesystem().getFSImage().getStorage(); checkNNStorage(storage,3,7); } finally { if (nn != null) { nn.stop(); nn.join(); } } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRollbackWithQJM() throws Exception { final Configuration conf=new HdfsConfiguration(); MiniJournalCluster mjc=null; MiniDFSCluster cluster=null; final Path foo=new Path("/foo"); final Path bar=new Path("/bar"); try { mjc=new MiniJournalCluster.Builder(conf).numJournalNodes(NUM_JOURNAL_NODES).build(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,mjc.getQuorumJournalURI(JOURNAL_ID).toString()); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); DistributedFileSystem dfs=cluster.getFileSystem(); final DFSAdmin dfsadmin=new DFSAdmin(conf); dfs.mkdirs(foo); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); Assert.assertEquals(0,dfsadmin.run(new String[]{"-rollingUpgrade","prepare"})); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); dfs.mkdirs(bar); dfs.close(); cluster.restartNameNode("-rollingUpgrade","rollback"); dfs=cluster.getFileSystem(); Assert.assertTrue(dfs.exists(foo)); Assert.assertFalse(dfs.exists(bar)); for (int i=0; i < NUM_JOURNAL_NODES; i++) { File dir=mjc.getCurrentDir(0,JOURNAL_ID); checkJNStorage(dir,4,7); } } finally { if (cluster != null) { cluster.shutdown(); } if (mjc != null) { mjc.shutdown(); } } }

Class: org.apache.hadoop.hdfs.TestSafeMode

InternalCallVerifier EqualityVerifier 
/** * Test that, if there are no blocks in the filesystem, * the NameNode doesn't enter the "safemode extension" period. */ @Test(timeout=45000) public void testNoExtensionIfNoBlocks() throws IOException { cluster.getConfiguration(0).setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,60000); cluster.restartNameNode(); String status=cluster.getNameNode().getNamesystem().getSafemode(); assertEquals("",status); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the NN initializes its under-replicated blocks queue * before it is ready to exit safemode (HDFS-1476) */ @Test(timeout=45000) public void testInitializeReplQueuesEarly() throws Exception { LOG.info("Starting testInitializeReplQueuesEarly"); BlockManagerTestUtil.setWritingPrefersLocalNode(cluster.getNamesystem().getBlockManager(),false); cluster.startDataNodes(conf,2,true,StartupOption.REGULAR,null); cluster.waitActive(); LOG.info("Creating files"); DFSTestUtil.createFile(fs,TEST_PATH,15 * BLOCK_SIZE,(short)1,1L); LOG.info("Stopping all DataNodes"); List dnprops=Lists.newLinkedList(); dnprops.add(cluster.stopDataNode(0)); dnprops.add(cluster.stopDataNode(0)); dnprops.add(cluster.stopDataNode(0)); cluster.getConfiguration(0).setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,1f / 15f); LOG.info("Restarting NameNode"); cluster.restartNameNode(); final NameNode nn=cluster.getNameNode(); String status=nn.getNamesystem().getSafemode(); assertEquals("Safe mode is ON. The reported blocks 0 needs additional " + "15 blocks to reach the threshold 0.9990 of total blocks 15.\n" + "The number of live datanodes 0 has reached the minimum number 0. "+ "Safe mode will be turned off automatically once the thresholds "+ "have been reached.",status); assertFalse("Mis-replicated block queues should not be initialized " + "until threshold is crossed",NameNodeAdapter.safeModeInitializedReplQueues(nn)); LOG.info("Restarting one DataNode"); cluster.restartDataNode(dnprops.remove(0)); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ return getLongCounter("StorageBlockReportOps",getMetrics(NN_METRICS)) == cluster.getStoragesPerDatanode(); } } ,10,10000); final int safe=NameNodeAdapter.getSafeModeSafeBlocks(nn); assertTrue("Expected first block report to make some blocks safe.",safe > 0); assertTrue("Did not expect first block report to make all blocks safe.",safe < 15); assertTrue(NameNodeAdapter.safeModeInitializedReplQueues(nn)); BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager()); long underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks(); while (underReplicatedBlocks != (15 - safe)) { LOG.info("UnderReplicatedBlocks expected=" + (15 - safe) + ", actual="+ underReplicatedBlocks); Thread.sleep(100); BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager()); underReplicatedBlocks=nn.getNamesystem().getUnderReplicatedBlocks(); } cluster.restartDataNodes(); }

InternalCallVerifier EqualityVerifier 
/** * Test that, when under-replicated blocks are processed at the end of * safe-mode, blocks currently under construction are not considered * under-construction or missing. Regression test for HDFS-2822. */ @Test public void testRbwBlocksNotConsideredUnderReplicated() throws IOException { List stms=Lists.newArrayList(); try { DFSTestUtil.createFile(fs,new Path("/junk-blocks"),BLOCK_SIZE * 4,(short)1,1L); for (int i=0; i < 10; i++) { FSDataOutputStream stm=fs.create(new Path("/append-" + i),true,BLOCK_SIZE,(short)1,BLOCK_SIZE); stms.add(stm); stm.write(1); stm.hflush(); } cluster.restartNameNode(); FSNamesystem ns=cluster.getNameNode(0).getNamesystem(); BlockManagerTestUtil.updateState(ns.getBlockManager()); assertEquals(0,ns.getPendingReplicationBlocks()); assertEquals(0,ns.getCorruptReplicaBlocks()); assertEquals(0,ns.getMissingBlocksCount()); } finally { for ( FSDataOutputStream stm : stms) { IOUtils.closeStream(stm); } cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that the NameNode stays in safemode when dfs.safemode.datanode.min * is set to a number greater than the number of live datanodes. */ @Test public void testDatanodeThreshold() throws IOException { cluster.shutdown(); Configuration conf=cluster.getConfiguration(0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,0); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,1); cluster.restartNameNode(); fs=cluster.getFileSystem(); String tipMsg=cluster.getNamesystem().getSafemode(); assertTrue("Safemode tip message doesn't look right: " + tipMsg,tipMsg.contains("The number of live datanodes 0 needs an additional " + "1 live datanodes to reach the minimum number 1.\n" + "Safe mode will be turned off automatically")); cluster.startDataNodes(conf,1,true,null,null); try { Thread.sleep(1000); } catch ( InterruptedException ignored) { } assertEquals("",cluster.getNamesystem().getSafemode()); }

Class: org.apache.hadoop.hdfs.TestSeekBug

InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
/** * Test (expected to throw IOE) for negative * FSDataInpuStream#seek argument */ @Test(expected=IOException.class) public void testNegativeSeek() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=cluster.getFileSystem(); try { Path seekFile=new Path("seekboundaries.dat"); DFSTestUtil.createFile(fs,seekFile,ONEMB,ONEMB,fs.getDefaultBlockSize(seekFile),fs.getDefaultReplication(seekFile),seed); FSDataInputStream stream=fs.open(seekFile); stream.seek(65536); assertEquals(65536,stream.getPos()); stream.seek(-73); } finally { fs.close(); cluster.shutdown(); } }

InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
/** * Test (expected to throw IOE) for FSDataInpuStream#seek * when the position argument is larger than the file size. */ @Test(expected=IOException.class) public void testSeekPastFileSize() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); FileSystem fs=cluster.getFileSystem(); try { Path seekFile=new Path("seekboundaries.dat"); DFSTestUtil.createFile(fs,seekFile,ONEMB,ONEMB,fs.getDefaultBlockSize(seekFile),fs.getDefaultReplication(seekFile),seed); FSDataInputStream stream=fs.open(seekFile); stream.seek(65536); assertEquals(65536,stream.getPos()); stream.seek(ONEMB + ONEMB + ONEMB); } finally { fs.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestSetTimes

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests mod time change at close in DFS. */ @Test public void testTimesAtClose() throws IOException { Configuration conf=new HdfsConfiguration(); final int MAX_IDLE_TIME=2000; int replicas=1; conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,50); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ",numDatanodes,info.length); FileSystem fileSys=cluster.getFileSystem(); assertTrue(fileSys instanceof DistributedFileSystem); try { Path file1=new Path("/simple.dat"); FSDataOutputStream stm=writeFile(fileSys,file1,replicas); System.out.println("Created and wrote file simple.dat"); FileStatus statBeforeClose=fileSys.getFileStatus(file1); long mtimeBeforeClose=statBeforeClose.getModificationTime(); String mdateBeforeClose=dateForm.format(new Date(mtimeBeforeClose)); System.out.println("mtime on " + file1 + " before close is "+ mdateBeforeClose+ " ("+ mtimeBeforeClose+ ")"); assertTrue(mtimeBeforeClose != 0); stm.close(); System.out.println("Closed file."); FileStatus statAfterClose=fileSys.getFileStatus(file1); long mtimeAfterClose=statAfterClose.getModificationTime(); String mdateAfterClose=dateForm.format(new Date(mtimeAfterClose)); System.out.println("mtime on " + file1 + " after close is "+ mdateAfterClose+ " ("+ mtimeAfterClose+ ")"); assertTrue(mtimeAfterClose != 0); assertTrue(mtimeBeforeClose != mtimeAfterClose); cleanupFile(fileSys,file1); } catch ( IOException e) { info=client.datanodeReport(DatanodeReportType.ALL); printDatanodeReport(info); throw e; } finally { fileSys.close(); cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests mod & access time in DFS. */ @Test public void testTimes() throws IOException { Configuration conf=new HdfsConfiguration(); final int MAX_IDLE_TIME=2000; conf.setInt("ipc.client.connection.maxidletime",MAX_IDLE_TIME); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); final int nnport=cluster.getNameNodePort(); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ",numDatanodes,info.length); FileSystem fileSys=cluster.getFileSystem(); int replicas=1; assertTrue(fileSys instanceof DistributedFileSystem); try { System.out.println("Creating testdir1 and testdir1/test1.dat."); Path dir1=new Path("testdir1"); Path file1=new Path(dir1,"test1.dat"); FSDataOutputStream stm=writeFile(fileSys,file1,replicas); FileStatus stat=fileSys.getFileStatus(file1); long atimeBeforeClose=stat.getAccessTime(); String adate=dateForm.format(new Date(atimeBeforeClose)); System.out.println("atime on " + file1 + " before close is "+ adate+ " ("+ atimeBeforeClose+ ")"); assertTrue(atimeBeforeClose != 0); stm.close(); stat=fileSys.getFileStatus(file1); long atime1=stat.getAccessTime(); long mtime1=stat.getModificationTime(); adate=dateForm.format(new Date(atime1)); String mdate=dateForm.format(new Date(mtime1)); System.out.println("atime on " + file1 + " is "+ adate+ " ("+ atime1+ ")"); System.out.println("mtime on " + file1 + " is "+ mdate+ " ("+ mtime1+ ")"); assertTrue(atime1 != 0); stat=fileSys.getFileStatus(dir1); long mdir1=stat.getAccessTime(); assertTrue(mdir1 == 0); long atime2=atime1 - (24L * 3600L * 1000L); fileSys.setTimes(file1,-1,atime2); stat=fileSys.getFileStatus(file1); long atime3=stat.getAccessTime(); String adate3=dateForm.format(new Date(atime3)); System.out.println("new atime on " + file1 + " is "+ adate3+ " ("+ atime3+ ")"); assertTrue(atime2 == atime3); assertTrue(mtime1 == stat.getModificationTime()); long mtime2=mtime1 - (3600L * 1000L); fileSys.setTimes(file1,mtime2,-1); stat=fileSys.getFileStatus(file1); long mtime3=stat.getModificationTime(); String mdate3=dateForm.format(new Date(mtime3)); System.out.println("new mtime on " + file1 + " is "+ mdate3+ " ("+ mtime3+ ")"); assertTrue(atime2 == stat.getAccessTime()); assertTrue(mtime2 == mtime3); long mtime4=Time.now() - (3600L * 1000L); long atime4=Time.now(); fileSys.setTimes(dir1,mtime4,atime4); stat=fileSys.getFileStatus(dir1); assertTrue("Not matching the modification times",mtime4 == stat.getModificationTime()); assertTrue("Not matching the access times",atime4 == stat.getAccessTime()); Path nonExistingDir=new Path(dir1,"/nonExistingDir/"); try { fileSys.setTimes(nonExistingDir,mtime4,atime4); fail("Expecting FileNotFoundException"); } catch ( FileNotFoundException e) { assertTrue(e.getMessage().contains("File/Directory " + nonExistingDir.toString() + " does not exist.")); } cluster.shutdown(); try { Thread.sleep(2 * MAX_IDLE_TIME); } catch ( InterruptedException e) { } cluster=new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); System.out.println("Verifying times after cluster restart"); stat=fileSys.getFileStatus(file1); assertTrue(atime2 == stat.getAccessTime()); assertTrue(mtime3 == stat.getModificationTime()); cleanupFile(fileSys,file1); cleanupFile(fileSys,dir1); } catch ( IOException e) { info=client.datanodeReport(DatanodeReportType.ALL); printDatanodeReport(info); throw e; } finally { fileSys.close(); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.TestWriteRead

APIUtilityVerifier EqualityVerifier 
/** * Junit Test reading while writing. */ @Test public void testWriteReadSeq() throws IOException { useFCOption=false; positionReadOption=false; String fname=filenameOption; long rdBeginPos=0; int stat=testWriteAndRead(fname,WR_NTIMES,WR_CHUNK_SIZE,rdBeginPos); LOG.info("Summary status from test1: status= " + stat); Assert.assertEquals(0,stat); }

APIUtilityVerifier EqualityVerifier 
/** * Junit Test position read while writing. */ @Test public void testWriteReadPos() throws IOException { String fname=filenameOption; positionReadOption=true; long rdBeginPos=0; int stat=testWriteAndRead(fname,WR_NTIMES,WR_CHUNK_SIZE,rdBeginPos); Assert.assertEquals(0,stat); }

APIUtilityVerifier EqualityVerifier 
/** * Junit Test position read of the current block being written. */ @Test public void testReadPosCurrentBlock() throws IOException { String fname=filenameOption; positionReadOption=true; int wrChunkSize=(int)(blockSize) + (int)(blockSize / 2); long rdBeginPos=blockSize + 1; int numTimes=5; int stat=testWriteAndRead(fname,numTimes,wrChunkSize,rdBeginPos); Assert.assertEquals(0,stat); }

Class: org.apache.hadoop.hdfs.nfs.nfs3.TestClientAccessPrivilege

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testClientAccessPrivilegeForRemove() throws Exception { config.set("dfs.nfs.exports.allowed.hosts","* ro"); Nfs3 nfs=new Nfs3(config); nfs.startServiceInternal(false); RpcProgramNfs3 nfsd=(RpcProgramNfs3)nfs.getRpcProgram(); HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); XDR xdr_req=new XDR(); FileHandle handle=new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeString("f1"); REMOVE3Response response=nfsd.remove(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code",Nfs3Status.NFS3ERR_ACCES,response.getStatus()); }

Class: org.apache.hadoop.hdfs.nfs.nfs3.TestDFSClientCache

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEviction() throws IOException { NfsConfiguration conf=new NfsConfiguration(); conf.set(FileSystem.FS_DEFAULT_NAME_KEY,"hdfs://localhost"); final int MAX_CACHE_SIZE=2; DFSClientCache cache=new DFSClientCache(conf,MAX_CACHE_SIZE); DFSClient c1=cache.getDfsClient("test1"); assertTrue(cache.getDfsClient("test1").toString().contains("ugi=test1")); assertEquals(c1,cache.getDfsClient("test1")); assertFalse(isDfsClientClose(c1)); cache.getDfsClient("test2"); assertTrue(isDfsClientClose(c1)); assertEquals(MAX_CACHE_SIZE - 1,cache.clientCache.size()); }

Class: org.apache.hadoop.hdfs.nfs.nfs3.TestNfs3Utils

InternalCallVerifier EqualityVerifier 
@Test public void testGetAccessRightsForUserGroup() throws IOException { Nfs3FileAttributes attr=Mockito.mock(Nfs3FileAttributes.class); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(3); Mockito.when(attr.getMode()).thenReturn(448); Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); assertEquals("No access should be allowed as UID does not match attribute over mode 700",0,Nfs3Utils.getAccessRightsForUserGroup(3,3,null,attr)); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(3); Mockito.when(attr.getMode()).thenReturn(56); Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); assertEquals("No access should be allowed as GID does not match attribute over mode 070",0,Nfs3Utils.getAccessRightsForUserGroup(2,4,null,attr)); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(3); Mockito.when(attr.getMode()).thenReturn(7); Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); assertEquals("Access should be allowed as mode is 007 and UID/GID do not match",61,Nfs3Utils.getAccessRightsForUserGroup(1,4,new int[]{5,6},attr)); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(10); Mockito.when(attr.getMode()).thenReturn(288); Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSREG.toValue()); assertEquals("Access should be allowed as mode is 440 and Aux GID does match",1,Nfs3Utils.getAccessRightsForUserGroup(3,4,new int[]{5,16,10},attr)); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(10); Mockito.when(attr.getMode()).thenReturn(448); Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue()); assertEquals("Access should be allowed for dir as mode is 700 and UID does match",31,Nfs3Utils.getAccessRightsForUserGroup(2,4,new int[]{5,16,10},attr)); assertEquals("No access should be allowed for dir as mode is 700 even though GID does match",0,Nfs3Utils.getAccessRightsForUserGroup(3,10,new int[]{5,16,4},attr)); assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match",0,Nfs3Utils.getAccessRightsForUserGroup(3,20,new int[]{5,10},attr)); Mockito.when(attr.getUid()).thenReturn(2); Mockito.when(attr.getGid()).thenReturn(10); Mockito.when(attr.getMode()).thenReturn(457); Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue()); assertEquals("Access should be allowed for dir as mode is 711 and GID matches",2,Nfs3Utils.getAccessRightsForUserGroup(3,10,new int[]{5,16,11},attr)); }

Class: org.apache.hadoop.hdfs.nfs.nfs3.TestOffsetRange

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompare() throws IOException { OffsetRange r1=new OffsetRange(0,1); OffsetRange r2=new OffsetRange(1,3); OffsetRange r3=new OffsetRange(1,3); OffsetRange r4=new OffsetRange(3,4); assertEquals(0,OffsetRange.ReverseComparatorOnMin.compare(r2,r3)); assertEquals(0,OffsetRange.ReverseComparatorOnMin.compare(r2,r2)); assertTrue(OffsetRange.ReverseComparatorOnMin.compare(r2,r1) < 0); assertTrue(OffsetRange.ReverseComparatorOnMin.compare(r2,r4) > 0); }

Class: org.apache.hadoop.hdfs.nfs.nfs3.TestRpcProgramNfs3

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testRmdir() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); XDR xdr_req=new XDR(); FileHandle handle=new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeString("foo"); RMDIR3Response response1=nfsd.rmdir(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); RMDIR3Response response2=nfsd.rmdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testAccess() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); XDR xdr_req=new XDR(); handle.serialize(xdr_req); ACCESS3Response response1=nfsd.access(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); ACCESS3Response response2=nfsd.access(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testCreate() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); XDR xdr_req=new XDR(); FileHandle handle=new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeString("fubar"); xdr_req.writeInt(Nfs3Constant.CREATE_UNCHECKED); SetAttr3 symAttr=new SetAttr3(); symAttr.serialize(xdr_req); CREATE3Response response1=nfsd.create(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); CREATE3Response response2=nfsd.create(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testLookup() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); LOOKUP3Request lookupReq=new LOOKUP3Request(handle,"bar"); XDR xdr_req=new XDR(); lookupReq.serialize(xdr_req); LOOKUP3Response response1=nfsd.lookup(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); LOOKUP3Response response2=nfsd.lookup(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testReadlink() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); XDR xdr_req=new XDR(); FileHandle handle=new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeString("fubar"); SetAttr3 symAttr=new SetAttr3(); symAttr.serialize(xdr_req); xdr_req.writeString("bar"); SYMLINK3Response response=nfsd.symlink(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response.getStatus()); FileHandle handle2=response.getObjFileHandle(); XDR xdr_req2=new XDR(); handle2.serialize(xdr_req2); READLINK3Response response1=nfsd.readlink(xdr_req2.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); READLINK3Response response2=nfsd.readlink(xdr_req2.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testPathconf() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); XDR xdr_req=new XDR(); handle.serialize(xdr_req); PATHCONF3Response response1=nfsd.pathconf(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); PATHCONF3Response response2=nfsd.pathconf(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testMkdir() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); XDR xdr_req=new XDR(); FileHandle handle=new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeString("fubar"); SetAttr3 symAttr=new SetAttr3(); symAttr.serialize(xdr_req); xdr_req.writeString("bar"); SYMLINK3Response response1=nfsd.symlink(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); SYMLINK3Response response2=nfsd.symlink(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testSymlink() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); XDR xdr_req=new XDR(); FileHandle handle=new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeString("fubar"); SetAttr3 symAttr=new SetAttr3(); symAttr.serialize(xdr_req); xdr_req.writeString("bar"); SYMLINK3Response response1=nfsd.symlink(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); SYMLINK3Response response2=nfsd.symlink(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testGetattr() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); XDR xdr_req=new XDR(); handle.serialize(xdr_req); GETATTR3Response response1=nfsd.getattr(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); GETATTR3Response response2=nfsd.getattr(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testReaddir() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); XDR xdr_req=new XDR(); handle.serialize(xdr_req); xdr_req.writeLongAsHyper(0); xdr_req.writeLongAsHyper(0); xdr_req.writeInt(100); READDIR3Response response1=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); READDIR3Response response2=nfsd.readdir(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testFsinfo() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); XDR xdr_req=new XDR(); handle.serialize(xdr_req); FSINFO3Response response1=nfsd.fsinfo(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); FSINFO3Response response2=nfsd.fsinfo(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testFsstat() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); XDR xdr_req=new XDR(); handle.serialize(xdr_req); FSSTAT3Response response1=nfsd.fsstat(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); FSSTAT3Response response2=nfsd.fsstat(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testWrite() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); byte[] buffer=new byte[10]; for (int i=0; i < 10; i++) { buffer[i]=(byte)i; } WRITE3Request writeReq=new WRITE3Request(handle,0,10,WriteStableHow.DATA_SYNC,ByteBuffer.wrap(buffer)); XDR xdr_req=new XDR(); writeReq.serialize(xdr_req); WRITE3Response response1=nfsd.write(xdr_req.asReadOnlyWrap(),null,1,securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); WRITE3Response response2=nfsd.write(xdr_req.asReadOnlyWrap(),null,1,securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect response:",null,response2); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testRead() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); READ3Request readReq=new READ3Request(handle,0,5); XDR xdr_req=new XDR(); readReq.serialize(xdr_req); READ3Response response1=nfsd.read(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); READ3Response response2=nfsd.read(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testReaddirplus() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); XDR xdr_req=new XDR(); handle.serialize(xdr_req); xdr_req.writeLongAsHyper(0); xdr_req.writeLongAsHyper(0); xdr_req.writeInt(3); xdr_req.writeInt(2); READDIRPLUS3Response response1=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); READDIRPLUS3Response response2=nfsd.readdirplus(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testCommit() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo("/tmp/bar"); long dirId=status.getFileId(); FileHandle handle=new FileHandle(dirId); XDR xdr_req=new XDR(); handle.serialize(xdr_req); xdr_req.writeLongAsHyper(0); xdr_req.writeInt(5); Channel ch=Mockito.mock(Channel.class); COMMIT3Response response1=nfsd.commit(xdr_req.asReadOnlyWrap(),ch,1,securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); COMMIT3Response response2=nfsd.commit(xdr_req.asReadOnlyWrap(),ch,1,securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect COMMIT3Response:",null,response2); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testRename() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); XDR xdr_req=new XDR(); FileHandle handle=new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeString("bar"); handle.serialize(xdr_req); xdr_req.writeString("fubar"); RENAME3Response response1=nfsd.rename(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); RENAME3Response response2=nfsd.rename(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testSetattr() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); XDR xdr_req=new XDR(); FileHandle handle=new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeString("bar"); SetAttr3 symAttr=new SetAttr3(); symAttr.serialize(xdr_req); xdr_req.writeBoolean(false); SETATTR3Response response1=nfsd.setattr(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); SETATTR3Response response2=nfsd.setattr(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code",Nfs3Status.NFS3_OK,response2.getStatus()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testRemove() throws Exception { HdfsFileStatus status=nn.getRpcServer().getFileInfo(testdir); long dirId=status.getFileId(); XDR xdr_req=new XDR(); FileHandle handle=new FileHandle(dirId); handle.serialize(xdr_req); xdr_req.writeString("bar"); REMOVE3Response response1=nfsd.remove(xdr_req.asReadOnlyWrap(),securityHandlerUnpriviledged,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3ERR_ACCES,response1.getStatus()); REMOVE3Response response2=nfsd.remove(xdr_req.asReadOnlyWrap(),securityHandler,new InetSocketAddress("localhost",1234)); assertEquals("Incorrect return code:",Nfs3Status.NFS3_OK,response2.getStatus()); }

Class: org.apache.hadoop.hdfs.nfs.nfs3.TestWrites

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckCommitFromRead() throws IOException { DFSClient dfsClient=Mockito.mock(DFSClient.class); Nfs3FileAttributes attr=new Nfs3FileAttributes(); HdfsDataOutputStream fos=Mockito.mock(HdfsDataOutputStream.class); Mockito.when(fos.getPos()).thenReturn((long)0); NfsConfiguration config=new NfsConfiguration(); OpenFileCtx ctx=new OpenFileCtx(fos,attr,"/dumpFilePath",dfsClient,new IdUserGroup(config)); FileHandle h=new FileHandle(1); COMMIT_STATUS ret; WriteManager wm=new WriteManager(new IdUserGroup(config),config,false); assertTrue(wm.addOpenFileStream(h,ctx)); ctx.setActiveStatusForTest(false); Channel ch=Mockito.mock(Channel.class); ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_CTX,ret); assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,0)); ctx.getPendingWritesForTest().put(new OffsetRange(5,10),new WriteCtx(null,0,0,0,null,null,null,0,false,null)); ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE,ret); assertEquals(Nfs3Status.NFS3ERR_IO,wm.commitBeforeRead(dfsClient,h,0)); ctx.setActiveStatusForTest(true); Mockito.when(fos.getPos()).thenReturn((long)10); COMMIT_STATUS status=ctx.checkCommitInternal(5,ch,1,attr,false); assertEquals(COMMIT_STATUS.COMMIT_DO_SYNC,status); ret=ctx.checkCommit(dfsClient,5,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret); assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,5)); status=ctx.checkCommitInternal(10,ch,1,attr,true); assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC); ret=ctx.checkCommit(dfsClient,10,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret); assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,10)); ConcurrentNavigableMap commits=ctx.getPendingCommitsForTest(); assertTrue(commits.size() == 0); ret=ctx.checkCommit(dfsClient,11,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_WAIT,ret); assertEquals(0,commits.size()); assertEquals(Nfs3Status.NFS3ERR_JUKEBOX,wm.commitBeforeRead(dfsClient,h,11)); ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_WAIT,ret); assertEquals(0,commits.size()); assertEquals(Nfs3Status.NFS3ERR_JUKEBOX,wm.commitBeforeRead(dfsClient,h,0)); ctx.getPendingWritesForTest().remove(new OffsetRange(5,10)); ret=ctx.checkCommit(dfsClient,0,ch,1,attr,true); assertEquals(COMMIT_STATUS.COMMIT_FINISHED,ret); assertEquals(Nfs3Status.NFS3_OK,wm.commitBeforeRead(dfsClient,h,0)); }

Class: org.apache.hadoop.hdfs.protocol.TestExtendedBlock

EqualityVerifier 
@Test public void testEquals(){ assertEquals(new ExtendedBlock(POOL_A,BLOCK_1_GS1),new ExtendedBlock(POOL_A,BLOCK_1_GS1)); assertNotEquals(new ExtendedBlock(POOL_A,BLOCK_1_GS1),new ExtendedBlock(POOL_B,BLOCK_1_GS1)); assertNotEquals(new ExtendedBlock(POOL_A,BLOCK_1_GS1),new ExtendedBlock(POOL_A,BLOCK_2_GS1)); assertEquals(new ExtendedBlock(POOL_A,BLOCK_1_GS1),new ExtendedBlock(POOL_A,BLOCK_1_GS2)); }

EqualityVerifier 
@Test public void testHashcode(){ assertNotEquals(new ExtendedBlock(POOL_A,BLOCK_1_GS1).hashCode(),new ExtendedBlock(POOL_B,BLOCK_1_GS1).hashCode()); assertNotEquals(new ExtendedBlock(POOL_A,BLOCK_1_GS1).hashCode(),new ExtendedBlock(POOL_A,BLOCK_2_GS1).hashCode()); assertEquals(new ExtendedBlock(POOL_A,BLOCK_1_GS1).hashCode(),new ExtendedBlock(POOL_A,BLOCK_1_GS1).hashCode()); }

Class: org.apache.hadoop.hdfs.protocol.TestLayoutVersion

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test to make sure NameNode.Feature support previous features */ @Test public void testNameNodeFeature(){ final LayoutFeature first=NameNodeLayoutVersion.Feature.ROLLING_UPGRADE; assertTrue(NameNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion())); assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test to make sure DataNode.Feature support previous features */ @Test public void testDataNodeFeature(){ final LayoutFeature first=DataNodeLayoutVersion.Feature.FIRST_LAYOUT; assertTrue(DataNodeLayoutVersion.supports(LAST_NON_RESERVED_COMMON_FEATURE,first.getInfo().getLayoutVersion())); assertEquals(LAST_COMMON_FEATURE.getInfo().getLayoutVersion() - 1,first.getInfo().getLayoutVersion()); }

Class: org.apache.hadoop.hdfs.protocolPB.TestPBHelper

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertCheckpointSignature(){ CheckpointSignature s=new CheckpointSignature(getStorageInfo(NodeType.NAME_NODE),"bpid",100,1); CheckpointSignatureProto sProto=PBHelper.convert(s); CheckpointSignature s1=PBHelper.convert(sProto); assertEquals(s.getBlockpoolID(),s1.getBlockpoolID()); assertEquals(s.getClusterID(),s1.getClusterID()); assertEquals(s.getCTime(),s1.getCTime()); assertEquals(s.getCurSegmentTxId(),s1.getCurSegmentTxId()); assertEquals(s.getLayoutVersion(),s1.getLayoutVersion()); assertEquals(s.getMostRecentCheckpointTxId(),s1.getMostRecentCheckpointTxId()); assertEquals(s.getNamespaceID(),s1.getNamespaceID()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertRecoveringBlock(){ DatanodeInfo di1=DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo di2=DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo[] dnInfo=new DatanodeInfo[]{di1,di2}; RecoveringBlock b=new RecoveringBlock(getExtendedBlock(),dnInfo,3); RecoveringBlockProto bProto=PBHelper.convert(b); RecoveringBlock b1=PBHelper.convert(bProto); assertEquals(b.getBlock(),b1.getBlock()); DatanodeInfo[] dnInfo1=b1.getLocations(); assertEquals(dnInfo.length,dnInfo1.length); for (int i=0; i < dnInfo.length; i++) { compare(dnInfo[0],dnInfo1[0]); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertRemoteEditLogManifest(){ List logs=new ArrayList(); logs.add(new RemoteEditLog(1,10)); logs.add(new RemoteEditLog(11,20)); RemoteEditLogManifest m=new RemoteEditLogManifest(logs); RemoteEditLogManifestProto mProto=PBHelper.convert(m); RemoteEditLogManifest m1=PBHelper.convert(mProto); List logs1=m1.getLogs(); assertEquals(logs.size(),logs1.size()); for (int i=0; i < logs.size(); i++) { compare(logs.get(i),logs1.get(i)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertNamenodeRegistration(){ StorageInfo info=getStorageInfo(NodeType.NAME_NODE); NamenodeRegistration reg=new NamenodeRegistration("address:999","http:1000",info,NamenodeRole.NAMENODE); NamenodeRegistrationProto regProto=PBHelper.convert(reg); NamenodeRegistration reg2=PBHelper.convert(regProto); assertEquals(reg.getAddress(),reg2.getAddress()); assertEquals(reg.getClusterID(),reg2.getClusterID()); assertEquals(reg.getCTime(),reg2.getCTime()); assertEquals(reg.getHttpAddress(),reg2.getHttpAddress()); assertEquals(reg.getLayoutVersion(),reg2.getLayoutVersion()); assertEquals(reg.getNamespaceID(),reg2.getNamespaceID()); assertEquals(reg.getRegistrationID(),reg2.getRegistrationID()); assertEquals(reg.getRole(),reg2.getRole()); assertEquals(reg.getVersion(),reg2.getVersion()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertExtendedBlock(){ ExtendedBlock b=getExtendedBlock(); ExtendedBlockProto bProto=PBHelper.convert(b); ExtendedBlock b1=PBHelper.convert(bProto); assertEquals(b,b1); b.setBlockId(-1); bProto=PBHelper.convert(b); b1=PBHelper.convert(bProto); assertEquals(b,b1); }

APIUtilityVerifier EqualityVerifier 
@Test public void testConvertLocatedBlockList(){ ArrayList lbl=new ArrayList(); for (int i=0; i < 3; i++) { lbl.add(createLocatedBlock()); } List lbpl=PBHelper.convertLocatedBlock2(lbl); List lbl2=PBHelper.convertLocatedBlock(lbpl); assertEquals(lbl.size(),lbl2.size()); for (int i=0; i < lbl.size(); i++) { compare(lbl.get(i),lbl2.get(2)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertStoragInfo(){ StorageInfo info=getStorageInfo(NodeType.NAME_NODE); StorageInfoProto infoProto=PBHelper.convert(info); StorageInfo info2=PBHelper.convert(infoProto,NodeType.NAME_NODE); assertEquals(info.getClusterID(),info2.getClusterID()); assertEquals(info.getCTime(),info2.getCTime()); assertEquals(info.getLayoutVersion(),info2.getLayoutVersion()); assertEquals(info.getNamespaceID(),info2.getNamespaceID()); }

APIUtilityVerifier EqualityVerifier 
@Test public void testAclStatusProto(){ AclEntry e=new AclEntry.Builder().setName("test").setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT).setType(AclEntryType.OTHER).build(); AclStatus s=new AclStatus.Builder().owner("foo").group("bar").addEntry(e).build(); Assert.assertEquals(s,PBHelper.convert(PBHelper.convert(s))); }

APIUtilityVerifier EqualityVerifier 
@Test public void testConvertLocatedBlockArray(){ LocatedBlock[] lbl=new LocatedBlock[3]; for (int i=0; i < 3; i++) { lbl[i]=createLocatedBlock(); } LocatedBlockProto[] lbpl=PBHelper.convertLocatedBlock(lbl); LocatedBlock[] lbl2=PBHelper.convertLocatedBlock(lbpl); assertEquals(lbl.length,lbl2.length); for (int i=0; i < lbl.length; i++) { compare(lbl[i],lbl2[i]); } }

InternalCallVerifier EqualityVerifier 
@Test public void testConvertText(){ Text t=new Text("abc".getBytes()); String s=t.toString(); Text t1=new Text(s); assertEquals(t,t1); }

EqualityVerifier 
@Test public void testConvertNamenodeRole(){ assertEquals(NamenodeRoleProto.BACKUP,PBHelper.convert(NamenodeRole.BACKUP)); assertEquals(NamenodeRoleProto.CHECKPOINT,PBHelper.convert(NamenodeRole.CHECKPOINT)); assertEquals(NamenodeRoleProto.NAMENODE,PBHelper.convert(NamenodeRole.NAMENODE)); assertEquals(NamenodeRole.BACKUP,PBHelper.convert(NamenodeRoleProto.BACKUP)); assertEquals(NamenodeRole.CHECKPOINT,PBHelper.convert(NamenodeRoleProto.CHECKPOINT)); assertEquals(NamenodeRole.NAMENODE,PBHelper.convert(NamenodeRoleProto.NAMENODE)); }

EqualityVerifier 
@Test public void testChecksumTypeProto(){ assertEquals(DataChecksum.Type.NULL,PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL)); assertEquals(DataChecksum.Type.CRC32,PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32)); assertEquals(DataChecksum.Type.CRC32C,PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C)); assertEquals(PBHelper.convert(DataChecksum.Type.NULL),HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL); assertEquals(PBHelper.convert(DataChecksum.Type.CRC32),HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32); assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C),HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertNamespaceInfo(){ NamespaceInfo info=new NamespaceInfo(37,"clusterID","bpID",2300); NamespaceInfoProto proto=PBHelper.convert(info); NamespaceInfo info2=PBHelper.convert(proto); compare(info,info2); assertEquals(info.getBlockPoolID(),info2.getBlockPoolID()); assertEquals(info.getBuildVersion(),info2.getBuildVersion()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertDatanodeRegistration(){ DatanodeID dnId=DFSTestUtil.getLocalDatanodeID(); BlockKey[] keys=new BlockKey[]{getBlockKey(2),getBlockKey(3)}; ExportedBlockKeys expKeys=new ExportedBlockKeys(true,9,10,getBlockKey(1),keys); DatanodeRegistration reg=new DatanodeRegistration(dnId,new StorageInfo(NodeType.DATA_NODE),expKeys,"3.0.0"); DatanodeRegistrationProto proto=PBHelper.convert(reg); DatanodeRegistration reg2=PBHelper.convert(proto); compare(reg.getStorageInfo(),reg2.getStorageInfo()); compare(reg.getExportedKeys(),reg2.getExportedKeys()); compare(reg,reg2); assertEquals(reg.getSoftwareVersion(),reg2.getSoftwareVersion()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertBlockCommand(){ Block[] blocks=new Block[]{new Block(21),new Block(22)}; DatanodeInfo[][] dnInfos=new DatanodeInfo[][]{new DatanodeInfo[1],new DatanodeInfo[2]}; dnInfos[0][0]=DFSTestUtil.getLocalDatanodeInfo(); dnInfos[1][0]=DFSTestUtil.getLocalDatanodeInfo(); dnInfos[1][1]=DFSTestUtil.getLocalDatanodeInfo(); String[][] storageIDs={{"s00"},{"s10","s11"}}; StorageType[][] storageTypes={{StorageType.DEFAULT},{StorageType.DEFAULT,StorageType.DEFAULT}}; BlockCommand bc=new BlockCommand(DatanodeProtocol.DNA_TRANSFER,"bp1",blocks,dnInfos,storageTypes,storageIDs); BlockCommandProto bcProto=PBHelper.convert(bc); BlockCommand bc2=PBHelper.convert(bcProto); assertEquals(bc.getAction(),bc2.getAction()); assertEquals(bc.getBlocks().length,bc2.getBlocks().length); Block[] blocks2=bc2.getBlocks(); for (int i=0; i < blocks.length; i++) { assertEquals(blocks[i],blocks2[i]); } DatanodeInfo[][] dnInfos2=bc2.getTargets(); assertEquals(dnInfos.length,dnInfos2.length); for (int i=0; i < dnInfos.length; i++) { DatanodeInfo[] d1=dnInfos[i]; DatanodeInfo[] d2=dnInfos2[i]; assertEquals(d1.length,d2.length); for (int j=0; j < d1.length; j++) { compare(d1[j],d2[j]); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testAclEntryProto(){ AclEntry e1=new AclEntry.Builder().setName("test").setPermission(FsAction.READ_EXECUTE).setScope(AclEntryScope.DEFAULT).setType(AclEntryType.OTHER).build(); AclEntry e2=new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).setPermission(FsAction.ALL).build(); AclEntry e3=new AclEntry.Builder().setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).setName("test").build(); AclEntry[] expected=new AclEntry[]{e1,e2,new AclEntry.Builder().setScope(e3.getScope()).setType(e3.getType()).setName(e3.getName()).setPermission(FsAction.NONE).build()}; AclEntry[] actual=Lists.newArrayList(PBHelper.convertAclEntry(PBHelper.convertAclEntryProto(Lists.newArrayList(e1,e2,e3)))).toArray(new AclEntry[0]); Assert.assertArrayEquals(expected,actual); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertBlocksWithLocations(){ BlockWithLocations[] list=new BlockWithLocations[]{getBlockWithLocations(1),getBlockWithLocations(2)}; BlocksWithLocations locs=new BlocksWithLocations(list); BlocksWithLocationsProto locsProto=PBHelper.convert(locs); BlocksWithLocations locs2=PBHelper.convert(locsProto); BlockWithLocations[] blocks=locs.getBlocks(); BlockWithLocations[] blocks2=locs2.getBlocks(); assertEquals(blocks.length,blocks2.length); for (int i=0; i < blocks.length; i++) { compare(blocks[i],blocks2[i]); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConvertBlockRecoveryCommand(){ DatanodeInfo di1=DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo di2=DFSTestUtil.getLocalDatanodeInfo(); DatanodeInfo[] dnInfo=new DatanodeInfo[]{di1,di2}; List blks=ImmutableList.of(new RecoveringBlock(getExtendedBlock(1),dnInfo,3),new RecoveringBlock(getExtendedBlock(2),dnInfo,3)); BlockRecoveryCommand cmd=new BlockRecoveryCommand(blks); BlockRecoveryCommandProto proto=PBHelper.convert(cmd); assertEquals(1,proto.getBlocks(0).getBlock().getB().getBlockId()); assertEquals(2,proto.getBlocks(1).getBlock().getB().getBlockId()); BlockRecoveryCommand cmd2=PBHelper.convert(proto); List cmd2Blks=Lists.newArrayList(cmd2.getRecoveringBlocks()); assertEquals(blks.get(0).getBlock(),cmd2Blks.get(0).getBlock()); assertEquals(blks.get(1).getBlock(),cmd2Blks.get(1).getBlock()); assertEquals(Joiner.on(",").join(blks),Joiner.on(",").join(cmd2Blks)); assertEquals(cmd.toString(),cmd2.toString()); }

APIUtilityVerifier EqualityVerifier 
@Test public void testConvertBlock(){ Block b=new Block(1,100,3); BlockProto bProto=PBHelper.convert(b); Block b2=PBHelper.convert(bProto); assertEquals(b,b2); }

Class: org.apache.hadoop.hdfs.qjournal.TestMiniJournalCluster

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testStartStop() throws IOException { Configuration conf=new Configuration(); MiniJournalCluster c=new MiniJournalCluster.Builder(conf).build(); try { URI uri=c.getQuorumJournalURI("myjournal"); String[] addrs=uri.getAuthority().split(";"); assertEquals(3,addrs.length); JournalNode node=c.getJournalNode(0); String dir=node.getConf().get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY); assertEquals(new File(MiniDFSCluster.getBaseDirectory() + "journalnode-0").getAbsolutePath(),dir); } finally { c.shutdown(); } }

Class: org.apache.hadoop.hdfs.qjournal.client.TestEpochsAreUnique

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSingleThreaded() throws IOException { Configuration conf=new Configuration(); MiniJournalCluster cluster=new MiniJournalCluster.Builder(conf).build(); URI uri=cluster.getQuorumJournalURI(JID); QuorumJournalManager qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO); try { qjm.format(FAKE_NSINFO); } finally { qjm.close(); } try { for (int i=0; i < 5; i++) { qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO); try { qjm.createNewUniqueEpoch(); assertEquals(i + 1,qjm.getLoggerSetForTests().getEpoch()); } finally { qjm.close(); } } long prevEpoch=5; for (int i=0; i < 20; i++) { long newEpoch=-1; while (true) { qjm=new QuorumJournalManager(conf,uri,FAKE_NSINFO,new FaultyLoggerFactory()); try { qjm.createNewUniqueEpoch(); newEpoch=qjm.getLoggerSetForTests().getEpoch(); break; } catch ( IOException ioe) { } finally { qjm.close(); } } LOG.info("Created epoch " + newEpoch); assertTrue("New epoch " + newEpoch + " should be greater than previous "+ prevEpoch,newEpoch > prevEpoch); prevEpoch=newEpoch; } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.qjournal.client.TestIPCLoggerChannel

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test that, once the queue eclipses the configure size limit, * calls to journal more data are rejected. */ @Test public void testQueueLimiting() throws Exception { DelayAnswer delayer=new DelayAnswer(LOG); Mockito.doAnswer(delayer).when(mockProxy).journal(Mockito.any(),Mockito.eq(1L),Mockito.eq(1L),Mockito.eq(1),Mockito.same(FAKE_DATA)); int numToQueue=LIMIT_QUEUE_SIZE_BYTES / FAKE_DATA.length; for (int i=1; i <= numToQueue; i++) { ch.sendEdits(1L,(long)i,1,FAKE_DATA); } assertEquals(LIMIT_QUEUE_SIZE_BYTES,ch.getQueuedEditsSize()); try { ch.sendEdits(1L,numToQueue + 1,1,FAKE_DATA).get(1,TimeUnit.SECONDS); fail("Did not fail to queue more calls after queue was full"); } catch ( ExecutionException ee) { if (!(ee.getCause() instanceof LoggerTooFarBehindException)) { throw ee; } } delayer.proceed(); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ return ch.getQueuedEditsSize() == 0; } } ,10,1000); }

Class: org.apache.hadoop.hdfs.qjournal.client.TestQuorumCall

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testQuorums() throws Exception { Map> futures=ImmutableMap.of("f1",SettableFuture.create(),"f2",SettableFuture.create(),"f3",SettableFuture.create()); QuorumCall q=QuorumCall.create(futures); assertEquals(0,q.countResponses()); futures.get("f1").set("first future"); q.waitFor(1,0,0,100000,"test"); q.waitFor(0,1,0,100000,"test"); assertEquals(1,q.countResponses()); futures.get("f2").setException(new Exception("error")); assertEquals(2,q.countResponses()); futures.get("f3").set("second future"); q.waitFor(3,0,100,100000,"test"); q.waitFor(0,2,100,100000,"test"); assertEquals(3,q.countResponses()); assertEquals("f1=first future,f3=second future",Joiner.on(",").withKeyValueSeparator("=").join(new TreeMap(q.getResults()))); try { q.waitFor(0,4,100,10,"test"); fail("Didn't time out waiting for more responses than came back"); } catch ( TimeoutException te) { } }

Class: org.apache.hadoop.hdfs.qjournal.client.TestQuorumJournalManager

TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier 
@Before public void setup() throws Exception { conf=new Configuration(); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0); cluster=new MiniJournalCluster.Builder(conf).build(); qjm=createSpyingQJM(); spies=qjm.getLoggerSetForTests().getLoggersForTests(); qjm.format(QJMTestUtil.FAKE_NSINFO); qjm.recoverUnfinalizedSegments(); assertEquals(1,qjm.getLoggerSetForTests().getEpoch()); }

EqualityVerifier 
/** * Test another edge case discovered by randomized testing. * Starts with the edge case state set up by{@link #setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery()}Recovery 2: * - New NN starts recovery and only talks to JN1 and JN2. JN0 has * crashed. Since they have no logs open, they say they don't need * recovery. * - Before writing any transactions, JN0 comes back to life and * JN1 crashes. * - Starts writing segment 101, and writes 50 transactions before crashing. * Recovery 3: * - JN1 has come back to life. JN2 crashes. * - New NN starts recovery and talks to all three. All three have * segments open from txid 101, so it calls prepareRecovery(101) * - JN0 has an already-accepted value for segment 101, so it replies * "you should recover 101-101" * - Former incorrect behavior: NN truncates logs to txid 101 even though * it should have recovered through 150. * In this case, even though there is an accepted recovery decision, * the newer log segments should take precedence, since they were written * in a newer epoch than the recorded decision. */ @Test public void testNewerVersionOfSegmentWins2() throws Exception { setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery(); cluster.getJournalNode(0).stopAndJoin(0); qjm=createSpyingQJM(); try { assertEquals(100,QJMTestUtil.recoverAndReturnLastTxn(qjm)); cluster.restartJournalNode(0); cluster.getJournalNode(1).stopAndJoin(0); writeSegment(cluster,qjm,101,50,false); } finally { qjm.close(); } cluster.restartJournalNode(1); cluster.getJournalNode(2).stopAndJoin(0); qjm=createSpyingQJM(); try { assertEquals(150,QJMTestUtil.recoverAndReturnLastTxn(qjm)); } finally { qjm.close(); } }

EqualityVerifier 
/** * Test an edge case discovered by randomized testing. * Starts with the edge case state set up by{@link #setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery()}Recovery 2: * - New NN starts recovery and only talks to JN1 and JN2. JN0 has * crashed. Since they have no logs open, they say they don't need * recovery. * - Starts writing segment 101, and writes 50 transactions before crashing. * Recovery 3: * - JN0 has come back to life. * - New NN starts recovery and talks to all three. All three have * segments open from txid 101, so it calls prepareRecovery(101) * - JN0 has an already-accepted value for segment 101, so it replies * "you should recover 101-101" * - Former incorrect behavior: NN truncates logs to txid 101 even though * it should have recovered through 150. * In this case, even though there is an accepted recovery decision, * the newer log segments should take precedence, since they were written * in a newer epoch than the recorded decision. */ @Test public void testNewerVersionOfSegmentWins() throws Exception { setupEdgeCaseOneJnHasSegmentWithAcceptedRecovery(); cluster.getJournalNode(0).stopAndJoin(0); qjm=createSpyingQJM(); try { assertEquals(100,QJMTestUtil.recoverAndReturnLastTxn(qjm)); writeSegment(cluster,qjm,101,50,false); } finally { qjm.close(); } cluster.restartJournalNode(0); qjm=createSpyingQJM(); try { assertEquals(150,QJMTestUtil.recoverAndReturnLastTxn(qjm)); } finally { qjm.close(); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReaderWhileAnotherWrites() throws Exception { QuorumJournalManager readerQjm=closeLater(createSpyingQJM()); List streams=Lists.newArrayList(); readerQjm.selectInputStreams(streams,0,false); assertEquals(0,streams.size()); writeSegment(cluster,qjm,1,3,true); readerQjm.selectInputStreams(streams,0,false); try { assertEquals(1,streams.size()); EditLogInputStream stream=streams.get(0); assertEquals(1,stream.getFirstTxId()); assertEquals(3,stream.getLastTxId()); verifyEdits(streams,1,3); assertNull(stream.readOp()); } finally { IOUtils.cleanup(LOG,streams.toArray(new Closeable[0])); streams.clear(); } writeSegment(cluster,qjm,4,3,false); readerQjm.selectInputStreams(streams,0,false); try { assertEquals(1,streams.size()); EditLogInputStream stream=streams.get(0); assertEquals(1,stream.getFirstTxId()); assertEquals(3,stream.getLastTxId()); verifyEdits(streams,1,3); } finally { IOUtils.cleanup(LOG,streams.toArray(new Closeable[0])); streams.clear(); } qjm.finalizeLogSegment(4,6); readerQjm.selectInputStreams(streams,0,false); try { assertEquals(2,streams.size()); assertEquals(4,streams.get(1).getFirstTxId()); assertEquals(6,streams.get(1).getLastTxId()); verifyEdits(streams,1,6); } finally { IOUtils.cleanup(LOG,streams.toArray(new Closeable[0])); streams.clear(); } }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test the case where one of the loggers misses a finalizeLogSegment() * call, and then misses the next startLogSegment() call before coming * back to life. * Previously, this caused it to keep on writing to the old log segment, * such that one logger had eg edits_1-10 while the others had edits_1-5 and * edits_6-10. This caused recovery to fail in certain cases. */ @Test public void testMissFinalizeAndNextStart() throws Exception { futureThrows(new IOException("injected")).when(spies.get(0)).finalizeLogSegment(Mockito.eq(1L),Mockito.eq(3L)); futureThrows(new IOException("injected")).when(spies.get(0)).startLogSegment(Mockito.eq(4L),Mockito.eq(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION)); failLoggerAtTxn(spies.get(1),4L); writeSegment(cluster,qjm,1,3,true); EditLogOutputStream stm=qjm.startLogSegment(4,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); try { writeTxns(stm,4,1); fail("Did not fail to write"); } catch ( QuorumException qe) { GenericTestUtils.assertExceptionContains("Writer out of sync",qe); } finally { stm.abort(); qjm.close(); } cluster.getJournalNode(2).stopAndJoin(0); qjm=createSpyingQJM(); long recovered=QJMTestUtil.recoverAndReturnLastTxn(qjm); assertEquals(3L,recovered); }

Class: org.apache.hadoop.hdfs.qjournal.client.TestSegmentRecoveryComparator

EqualityVerifier 
@Test public void testComparisons(){ Entry INPROGRESS_1_3=makeEntry(PrepareRecoveryResponseProto.newBuilder().setSegmentState(SegmentStateProto.newBuilder().setStartTxId(1L).setEndTxId(3L).setIsInProgress(true)).setLastWriterEpoch(0L).build()); Entry INPROGRESS_1_4=makeEntry(PrepareRecoveryResponseProto.newBuilder().setSegmentState(SegmentStateProto.newBuilder().setStartTxId(1L).setEndTxId(4L).setIsInProgress(true)).setLastWriterEpoch(0L).build()); Entry INPROGRESS_1_4_ACCEPTED=makeEntry(PrepareRecoveryResponseProto.newBuilder().setSegmentState(SegmentStateProto.newBuilder().setStartTxId(1L).setEndTxId(4L).setIsInProgress(true)).setLastWriterEpoch(0L).setAcceptedInEpoch(1L).build()); Entry FINALIZED_1_3=makeEntry(PrepareRecoveryResponseProto.newBuilder().setSegmentState(SegmentStateProto.newBuilder().setStartTxId(1L).setEndTxId(3L).setIsInProgress(false)).setLastWriterEpoch(0L).build()); assertEquals(0,INSTANCE.compare(INPROGRESS_1_3,INPROGRESS_1_3)); assertEquals(-1,INSTANCE.compare(INPROGRESS_1_3,INPROGRESS_1_4)); assertEquals(1,INSTANCE.compare(INPROGRESS_1_4,INPROGRESS_1_3)); assertEquals(-1,INSTANCE.compare(INPROGRESS_1_4,FINALIZED_1_3)); assertEquals(1,INSTANCE.compare(FINALIZED_1_3,INPROGRESS_1_4)); assertEquals(-1,INSTANCE.compare(INPROGRESS_1_4_ACCEPTED,FINALIZED_1_3)); assertEquals(1,INSTANCE.compare(FINALIZED_1_3,INPROGRESS_1_4_ACCEPTED)); }

Class: org.apache.hadoop.hdfs.qjournal.server.TestJournal

InternalCallVerifier EqualityVerifier 
/** * Test that, if the writer crashes at the very beginning of a segment, * before any transactions are written, that the next newEpoch() call * returns the prior segment txid as its most recent segment. */ @Test(timeout=10000) public void testNewEpochAtBeginningOfSegment() throws Exception { journal.newEpoch(FAKE_NSINFO,1); journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); journal.journal(makeRI(2),1,1,2,QJMTestUtil.createTxnData(1,2)); journal.finalizeLogSegment(makeRI(3),1,2); journal.startLogSegment(makeRI(4),3,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); NewEpochResponseProto resp=journal.newEpoch(FAKE_NSINFO,2); assertEquals(1,resp.getLastSegmentTxId()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testEpochHandling() throws Exception { assertEquals(0,journal.getLastPromisedEpoch()); NewEpochResponseProto newEpoch=journal.newEpoch(FAKE_NSINFO,1); assertFalse(newEpoch.hasLastSegmentTxId()); assertEquals(1,journal.getLastPromisedEpoch()); journal.newEpoch(FAKE_NSINFO,3); assertFalse(newEpoch.hasLastSegmentTxId()); assertEquals(3,journal.getLastPromisedEpoch()); try { journal.newEpoch(FAKE_NSINFO,3); fail("Should have failed to promise same epoch twice"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Proposed epoch 3 <= last promise 3",ioe); } try { journal.startLogSegment(makeRI(1),12345L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); fail("Should have rejected call from prior epoch"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 3",ioe); } try { journal.journal(makeRI(1),12345L,100L,0,new byte[0]); fail("Should have rejected call from prior epoch"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 3",ioe); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testFormatResetsCachedValues() throws Exception { journal.newEpoch(FAKE_NSINFO,12345L); journal.startLogSegment(new RequestInfo(JID,12345L,1L,0L),1L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); assertEquals(12345L,journal.getLastPromisedEpoch()); assertEquals(12345L,journal.getLastWriterEpoch()); assertTrue(journal.isFormatted()); journal.close(); journal.format(FAKE_NSINFO_2); assertEquals(0,journal.getLastPromisedEpoch()); assertEquals(0,journal.getLastWriterEpoch()); assertTrue(journal.isFormatted()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test whether JNs can correctly handle editlog that cannot be decoded. */ @Test public void testScanEditLog() throws Exception { journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1); final int numTxns=5; byte[] ops=QJMTestUtil.createGabageTxns(1,5); journal.journal(makeRI(2),1,1,numTxns,ops); SegmentStateProto segmentState=journal.getSegmentInfo(1); assertTrue(segmentState.getIsInProgress()); Assert.assertEquals(numTxns,segmentState.getEndTxId()); Assert.assertEquals(1,segmentState.getStartTxId()); journal.finalizeLogSegment(makeRI(3),1,numTxns); segmentState=journal.getSegmentInfo(1); assertFalse(segmentState.getIsInProgress()); Assert.assertEquals(numTxns,segmentState.getEndTxId()); Assert.assertEquals(1,segmentState.getStartTxId()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testRestartJournal() throws Exception { journal.newEpoch(FAKE_NSINFO,1); journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); journal.journal(makeRI(2),1,1,2,QJMTestUtil.createTxnData(1,2)); String storageString=journal.getStorage().toColonSeparatedString(); System.err.println("storage string: " + storageString); journal.close(); journal=new Journal(conf,TEST_LOG_DIR,JID,StartupOption.REGULAR,mockErrorReporter); assertEquals(storageString,journal.getStorage().toColonSeparatedString()); assertEquals(1,journal.getLastPromisedEpoch()); NewEpochResponseProtoOrBuilder newEpoch=journal.newEpoch(FAKE_NSINFO,2); assertEquals(1,newEpoch.getLastSegmentTxId()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testMaintainCommittedTxId() throws Exception { journal.newEpoch(FAKE_NSINFO,1); journal.startLogSegment(makeRI(1),1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); journal.journal(new RequestInfo(JID,1,2,0),1,1,3,QJMTestUtil.createTxnData(1,3)); assertEquals(0,journal.getCommittedTxnIdForTests()); journal.journal(new RequestInfo(JID,1,3,3),1,4,3,QJMTestUtil.createTxnData(4,6)); assertEquals(3,journal.getCommittedTxnIdForTests()); }

Class: org.apache.hadoop.hdfs.qjournal.server.TestJournalNode

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the JournalNode performs correctly as a Paxos * Acceptor process. */ @Test(timeout=100000) public void testAcceptRecoveryBehavior() throws Exception { try { ch.prepareRecovery(1L).get(); fail("Did not throw IllegalState when trying to run paxos without an epoch"); } catch ( ExecutionException ise) { GenericTestUtils.assertExceptionContains("bad epoch",ise); } ch.newEpoch(1).get(); ch.setEpoch(1); PrepareRecoveryResponseProto prep=ch.prepareRecovery(1L).get(); System.err.println("Prep: " + prep); assertFalse(prep.hasAcceptedInEpoch()); assertFalse(prep.hasSegmentState()); ch.startLogSegment(1L,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); ch.sendEdits(1L,1L,1,QJMTestUtil.createTxnData(1,1)).get(); prep=ch.prepareRecovery(1L).get(); System.err.println("Prep: " + prep); assertFalse(prep.hasAcceptedInEpoch()); assertTrue(prep.hasSegmentState()); ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get(); ch.newEpoch(2); ch.setEpoch(2); prep=ch.prepareRecovery(1L).get(); assertEquals(1L,prep.getAcceptedInEpoch()); assertEquals(1L,prep.getSegmentState().getEndTxId()); ch.setEpoch(1); try { ch.prepareRecovery(1L).get(); fail("prepare from earlier epoch not rejected"); } catch ( ExecutionException ioe) { GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe); } try { ch.acceptRecovery(prep.getSegmentState(),new URL("file:///dev/null")).get(); fail("accept from earlier epoch not rejected"); } catch ( ExecutionException ioe) { GenericTestUtils.assertExceptionContains("epoch 1 is less than the last promised epoch 2",ioe); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=100000) public void testHttpServer() throws Exception { String urlRoot=jn.getHttpServerURI(); String pageContents=DFSTestUtil.urlGet(new URL(urlRoot + "/jmx")); assertTrue("Bad contents: " + pageContents,pageContents.contains("Hadoop:service=JournalNode,name=JvmMetrics")); byte[] EDITS_DATA=QJMTestUtil.createTxnData(1,3); IPCLoggerChannel ch=new IPCLoggerChannel(conf,FAKE_NSINFO,journalId,jn.getBoundIpcAddress()); ch.newEpoch(1).get(); ch.setEpoch(1); ch.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); ch.sendEdits(1L,1,3,EDITS_DATA).get(); ch.finalizeLogSegment(1,3).get(); byte[] retrievedViaHttp=DFSTestUtil.urlGetBytes(new URL(urlRoot + "/getJournal?segmentTxId=1&jid=" + journalId)); byte[] expected=Bytes.concat(Ints.toByteArray(HdfsConstants.NAMENODE_LAYOUT_VERSION),(new byte[]{0,0,0,0}),EDITS_DATA); assertArrayEquals(expected,retrievedViaHttp); URL badUrl=new URL(urlRoot + "/getJournal?segmentTxId=12345&jid=" + journalId); HttpURLConnection connection=(HttpURLConnection)badUrl.openConnection(); try { assertEquals(404,connection.getResponseCode()); } finally { connection.disconnect(); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=100000) public void testReturnsSegmentInfoAtEpochTransition() throws Exception { ch.newEpoch(1).get(); ch.setEpoch(1); ch.startLogSegment(1,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); ch.sendEdits(1L,1,2,QJMTestUtil.createTxnData(1,2)).get(); NewEpochResponseProto response=ch.newEpoch(2).get(); ch.setEpoch(2); assertEquals(1,response.getLastSegmentTxId()); ch.finalizeLogSegment(1,2).get(); response=ch.newEpoch(3).get(); ch.setEpoch(3); assertEquals(1,response.getLastSegmentTxId()); ch.startLogSegment(3,NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); response=ch.newEpoch(4).get(); ch.setEpoch(4); assertEquals(1,response.getLastSegmentTxId()); }

Class: org.apache.hadoop.hdfs.qjournal.server.TestJournalNodeMXBean

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testJournalNodeMXBean() throws Exception { MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=JournalNode,name=JournalNodeInfo"); String journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus"); assertEquals(jn.getJournalsStatus(),journalStatus); assertFalse(journalStatus.contains(NAMESERVICE)); final NamespaceInfo FAKE_NSINFO=new NamespaceInfo(12345,"mycluster","my-bp",0L); jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO); journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus"); assertEquals(jn.getJournalsStatus(),journalStatus); Map> jMap=new HashMap>(); Map infoMap=new HashMap(); infoMap.put("Formatted","true"); jMap.put(NAMESERVICE,infoMap); assertEquals(JSON.toString(jMap),journalStatus); jCluster=new MiniJournalCluster.Builder(new Configuration()).format(false).numJournalNodes(NUM_JN).build(); jn=jCluster.getJournalNode(0); journalStatus=(String)mbs.getAttribute(mxbeanName,"JournalsStatus"); assertEquals(jn.getJournalsStatus(),journalStatus); jMap=new HashMap>(); infoMap=new HashMap(); infoMap.put("Formatted","true"); jMap.put(NAMESERVICE,infoMap); assertEquals(JSON.toString(jMap),journalStatus); }

Class: org.apache.hadoop.hdfs.security.TestDelegationToken

InternalCallVerifier EqualityVerifier 
@Test public void testAddDelegationTokensDFSApi() throws Exception { UserGroupInformation ugi=UserGroupInformation.createRemoteUser("JobTracker"); DistributedFileSystem dfs=cluster.getFileSystem(); Credentials creds=new Credentials(); final Token tokens[]=dfs.addDelegationTokens("JobTracker",creds); Assert.assertEquals(1,tokens.length); Assert.assertEquals(1,creds.numberOfTokens()); checkTokenIdentifier(ugi,tokens[0]); final Token tokens2[]=dfs.addDelegationTokens("JobTracker",creds); Assert.assertEquals(0,tokens2.length); Assert.assertEquals(1,creds.numberOfTokens()); }

APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testDelegationTokenWebHdfsApi() throws Exception { ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL); final String uri=WebHdfsFileSystem.SCHEME + "://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); final UserGroupInformation ugi=UserGroupInformation.createUserForTesting("JobTracker",new String[]{"user"}); final WebHdfsFileSystem webhdfs=ugi.doAs(new PrivilegedExceptionAction(){ @Override public WebHdfsFileSystem run() throws Exception { return (WebHdfsFileSystem)FileSystem.get(new URI(uri),config); } } ); { Credentials creds=new Credentials(); final Token tokens[]=webhdfs.addDelegationTokens("JobTracker",creds); Assert.assertEquals(1,tokens.length); Assert.assertEquals(1,creds.numberOfTokens()); Assert.assertSame(tokens[0],creds.getAllTokens().iterator().next()); checkTokenIdentifier(ugi,tokens[0]); final Token tokens2[]=webhdfs.addDelegationTokens("JobTracker",creds); Assert.assertEquals(0,tokens2.length); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testDelegationTokenWithDoAs() throws Exception { final DistributedFileSystem dfs=cluster.getFileSystem(); final Credentials creds=new Credentials(); final Token tokens[]=dfs.addDelegationTokens("JobTracker",creds); Assert.assertEquals(1,tokens.length); @SuppressWarnings("unchecked") final Token token=(Token)tokens[0]; final UserGroupInformation longUgi=UserGroupInformation.createRemoteUser("JobTracker/foo.com@FOO.COM"); final UserGroupInformation shortUgi=UserGroupInformation.createRemoteUser("JobTracker"); longUgi.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws IOException { try { token.renew(config); } catch ( Exception e) { Assert.fail("Could not renew delegation token for user " + longUgi); } return null; } } ); shortUgi.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { token.renew(config); return null; } } ); longUgi.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws IOException { try { token.cancel(config); } catch ( Exception e) { Assert.fail("Could not cancel delegation token for user " + longUgi); } return null; } } ); }

Class: org.apache.hadoop.hdfs.security.TestDelegationTokenForProxyUser

InternalCallVerifier EqualityVerifier 
@Test(timeout=20000) public void testDelegationTokenWithRealUser() throws IOException { try { Token[] tokens=proxyUgi.doAs(new PrivilegedExceptionAction[]>(){ @Override public Token[] run() throws IOException { return cluster.getFileSystem().addDelegationTokens("RenewerUser",null); } } ); DelegationTokenIdentifier identifier=new DelegationTokenIdentifier(); byte[] tokenId=tokens[0].getIdentifier(); identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId))); Assert.assertEquals(identifier.getUser().getUserName(),PROXY_USER); Assert.assertEquals(identifier.getUser().getRealUser().getUserName(),REAL_USER); } catch ( InterruptedException e) { } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=5000) public void testWebHdfsDoAs() throws Exception { WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()"); WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName()); final WebHdfsFileSystem webhdfs=WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi,config,WebHdfsFileSystem.SCHEME); final Path root=new Path("/"); cluster.getFileSystem().setPermission(root,new FsPermission((short)0777)); Whitebox.setInternalState(webhdfs,"ugi",proxyUgi); { Path responsePath=webhdfs.getHomeDirectory(); WebHdfsTestUtil.LOG.info("responsePath=" + responsePath); Assert.assertEquals(webhdfs.getUri() + "/user/" + PROXY_USER,responsePath.toString()); } final Path f=new Path("/testWebHdfsDoAs/a.txt"); { FSDataOutputStream out=webhdfs.create(f); out.write("Hello, webhdfs user!".getBytes()); out.close(); final FileStatus status=webhdfs.getFileStatus(f); WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner()); Assert.assertEquals(PROXY_USER,status.getOwner()); } { final FSDataOutputStream out=webhdfs.append(f); out.write("\nHello again!".getBytes()); out.close(); final FileStatus status=webhdfs.getFileStatus(f); WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner()); WebHdfsTestUtil.LOG.info("status.getLen() =" + status.getLen()); Assert.assertEquals(PROXY_USER,status.getOwner()); } }

Class: org.apache.hadoop.hdfs.security.token.block.TestBlockToken

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * This test writes a file and gets the block locations without closing the * file, and tests the block token in the last block. Block token is verified * by ensuring it is of correct kind. * @throws IOException * @throws InterruptedException */ @Test public void testBlockTokenInLastLocatedBlock() throws IOException, InterruptedException { Configuration conf=new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,true); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,512); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); try { FileSystem fs=cluster.getFileSystem(); String fileName="/testBlockTokenInLastLocatedBlock"; Path filePath=new Path(fileName); FSDataOutputStream out=fs.create(filePath,(short)1); out.write(new byte[1000]); LocatedBlocks locatedBlocks=cluster.getNameNodeRpc().getBlockLocations(fileName,0,1000); while (locatedBlocks.getLastLocatedBlock() == null) { Thread.sleep(100); locatedBlocks=cluster.getNameNodeRpc().getBlockLocations(fileName,0,1000); } Token token=locatedBlocks.getLastLocatedBlock().getBlockToken(); Assert.assertEquals(BlockTokenIdentifier.KIND_NAME,token.getKind()); out.close(); } finally { cluster.shutdown(); } }

APIUtilityVerifier IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test that fast repeated invocations of createClientDatanodeProtocolProxy * will not end up using up thousands of sockets. This is a regression test * for HDFS-1965. */ @Test public void testBlockTokenRpcLeak() throws Exception { Configuration conf=new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); Assume.assumeTrue(FD_DIR.exists()); BlockTokenSecretManager sm=new BlockTokenSecretManager(blockKeyUpdateInterval,blockTokenLifetime,0,"fake-pool",null); Token token=sm.generateToken(block3,EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); final Server server=createMockDatanode(sm,token,conf); server.start(); final InetSocketAddress addr=NetUtils.getConnectAddress(server); DatanodeID fakeDnId=DFSTestUtil.getLocalDatanodeID(addr.getPort()); ExtendedBlock b=new ExtendedBlock("fake-pool",new Block(12345L)); LocatedBlock fakeBlock=new LocatedBlock(b,new DatanodeInfo[0]); fakeBlock.setBlockToken(token); ClientDatanodeProtocol proxyToNoWhere=RPC.getProxy(ClientDatanodeProtocol.class,ClientDatanodeProtocol.versionID,new InetSocketAddress("1.1.1.1",1),UserGroupInformation.createRemoteUser("junk"),conf,NetUtils.getDefaultSocketFactory(conf)); ClientDatanodeProtocol proxy=null; int fdsAtStart=countOpenFileDescriptors(); try { long endTime=Time.now() + 3000; while (Time.now() < endTime) { proxy=DFSUtil.createClientDatanodeProtocolProxy(fakeDnId,conf,1000,false,fakeBlock); assertEquals(block3.getBlockId(),proxy.getReplicaVisibleLength(block3)); if (proxy != null) { RPC.stopProxy(proxy); } LOG.info("Num open fds:" + countOpenFileDescriptors()); } int fdsAtEnd=countOpenFileDescriptors(); if (fdsAtEnd - fdsAtStart > 50) { fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!"); } } finally { server.stop(); } RPC.stopProxy(proxyToNoWhere); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testBlockTokenRpc() throws Exception { Configuration conf=new Configuration(); conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); BlockTokenSecretManager sm=new BlockTokenSecretManager(blockKeyUpdateInterval,blockTokenLifetime,0,"fake-pool",null); Token token=sm.generateToken(block3,EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); final Server server=createMockDatanode(sm,token,conf); server.start(); final InetSocketAddress addr=NetUtils.getConnectAddress(server); final UserGroupInformation ticket=UserGroupInformation.createRemoteUser(block3.toString()); ticket.addToken(token); ClientDatanodeProtocol proxy=null; try { proxy=DFSUtil.createClientDatanodeProtocolProxy(addr,ticket,conf,NetUtils.getDefaultSocketFactory(conf)); assertEquals(block3.getBlockId(),proxy.getReplicaVisibleLength(block3)); } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } }

Class: org.apache.hadoop.hdfs.server.balancer.TestBalancer

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test parse method in Balancer#Cli class with threshold value out of * boundaries. */ @Test(timeout=100000) public void testBalancerCliParseWithThresholdOutOfBoundaries(){ String parameters[]=new String[]{"-threshold","0"}; String reason="IllegalArgumentException is expected when threshold value" + " is out of boundary."; try { Balancer.Cli.parse(parameters); fail(reason); } catch ( IllegalArgumentException e) { assertEquals("Number out of range: threshold = 0.0",e.getMessage()); } parameters=new String[]{"-threshold","101"}; try { Balancer.Cli.parse(parameters); fail(reason); } catch ( IllegalArgumentException e) { assertEquals("Number out of range: threshold = 101.0",e.getMessage()); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=100000) public void testUnknownDatanode() throws Exception { Configuration conf=new HdfsConfiguration(); initConf(conf); long distribution[]=new long[]{50 * CAPACITY / 100,70 * CAPACITY / 100,0 * CAPACITY / 100}; long capacities[]=new long[]{CAPACITY,CAPACITY,CAPACITY}; String racks[]=new String[]{RACK0,RACK1,RACK1}; int numDatanodes=distribution.length; if (capacities.length != numDatanodes || racks.length != numDatanodes) { throw new IllegalArgumentException("Array length is not the same"); } final long totalUsedSpace=sum(distribution); ExtendedBlock[] blocks=generateBlocks(conf,totalUsedSpace,(short)numDatanodes); Block[][] blocksDN=distributeBlocks(blocks,(short)(numDatanodes - 1),distribution); conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,"0.0f"); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).racks(racks).simulatedCapacities(capacities).build(); try { cluster.waitActive(); client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy(); for (int i=0; i < 3; i++) { cluster.injectBlocks(i,Arrays.asList(blocksDN[i]),null); } cluster.startDataNodes(conf,1,true,null,new String[]{RACK0},null,new long[]{CAPACITY}); cluster.triggerHeartbeats(); Collection namenodes=DFSUtil.getNsServiceRpcUris(conf); Set datanodes=new HashSet(); datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName()); Balancer.Parameters p=new Balancer.Parameters(Balancer.Parameters.DEFAULT.policy,Balancer.Parameters.DEFAULT.threshold,datanodes,Balancer.Parameters.DEFAULT.nodesToBeIncluded); final int r=Balancer.run(namenodes,p,conf); assertEquals(ExitStatus.SUCCESS.getExitCode(),r); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.balancer.TestBalancerWithHANameNodes

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test a cluster with even distribution, then a new empty node is added to * the cluster. Test start a cluster with specified number of nodes, and fills * it to be 30% full (with a single file replicated identically to all * datanodes); It then adds one new empty node and starts balancing. */ @Test(timeout=60000) public void testBalancerWithHANameNodes() throws Exception { Configuration conf=new HdfsConfiguration(); TestBalancer.initConf(conf); long newNodeCapacity=TestBalancer.CAPACITY; String newNodeRack=TestBalancer.RACK2; String[] racks=new String[]{TestBalancer.RACK0,TestBalancer.RACK1}; long[] capacities=new long[]{TestBalancer.CAPACITY,TestBalancer.CAPACITY}; assertEquals(capacities.length,racks.length); int numOfDatanodes=capacities.length; NNConf nn1Conf=new MiniDFSNNTopology.NNConf("nn1"); nn1Conf.setIpcPort(NameNode.DEFAULT_PORT); Configuration copiedConf=new Configuration(conf); cluster=new MiniDFSCluster.Builder(copiedConf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities).build(); HATestUtil.setFailoverConfigurations(cluster,conf); try { cluster.waitActive(); cluster.transitionToActive(1); Thread.sleep(500); client=NameNodeProxies.createProxy(conf,FileSystem.getDefaultUri(conf),ClientProtocol.class).getProxy(); long totalCapacity=TestBalancer.sum(capacities); long totalUsedSpace=totalCapacity * 3 / 10; TestBalancer.createFile(cluster,TestBalancer.filePath,totalUsedSpace / numOfDatanodes,(short)numOfDatanodes,1); cluster.startDataNodes(conf,1,true,null,new String[]{newNodeRack},new long[]{newNodeCapacity}); totalCapacity+=newNodeCapacity; TestBalancer.waitForHeartBeat(totalUsedSpace,totalCapacity,client,cluster); Collection namenodes=DFSUtil.getNsServiceRpcUris(conf); assertEquals(1,namenodes.size()); assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster))); final int r=Balancer.run(namenodes,Balancer.Parameters.DEFAULT,conf); assertEquals(ExitStatus.SUCCESS.getExitCode(),r); TestBalancer.waitForBalancer(totalUsedSpace,totalCapacity,client,cluster,Balancer.Parameters.DEFAULT); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.balancer.TestBalancerWithNodeGroup

EqualityVerifier PublicFieldVerifier 
/** * Create a cluster with even distribution, and a new empty node is added to * the cluster, then test node-group locality for balancer policy. */ @Test(timeout=60000) public void testBalancerWithNodeGroup() throws Exception { Configuration conf=createConf(); long[] capacities=new long[]{CAPACITY,CAPACITY,CAPACITY,CAPACITY}; String[] racks=new String[]{RACK0,RACK0,RACK1,RACK1}; String[] nodeGroups=new String[]{NODEGROUP0,NODEGROUP0,NODEGROUP1,NODEGROUP2}; int numOfDatanodes=capacities.length; assertEquals(numOfDatanodes,racks.length); assertEquals(numOfDatanodes,nodeGroups.length); MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities); MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups); cluster=new MiniDFSClusterWithNodeGroup(builder); try { cluster.waitActive(); client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy(); long totalCapacity=TestBalancer.sum(capacities); long totalUsedSpace=totalCapacity * 2 / 10; TestBalancer.createFile(cluster,filePath,totalUsedSpace / (numOfDatanodes / 2),(short)(numOfDatanodes / 2),0); long newCapacity=CAPACITY; String newRack=RACK1; String newNodeGroup=NODEGROUP2; cluster.startDataNodes(conf,1,true,null,new String[]{newRack},new long[]{newCapacity},new String[]{newNodeGroup}); totalCapacity+=newCapacity; runBalancer(conf,totalUsedSpace,totalCapacity); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Create a cluster with even distribution, and a new empty node is added to * the cluster, then test rack locality for balancer policy. */ @Test(timeout=60000) public void testBalancerWithRackLocality() throws Exception { Configuration conf=createConf(); long[] capacities=new long[]{CAPACITY,CAPACITY}; String[] racks=new String[]{RACK0,RACK1}; String[] nodeGroups=new String[]{NODEGROUP0,NODEGROUP1}; int numOfDatanodes=capacities.length; assertEquals(numOfDatanodes,racks.length); MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities); MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups); cluster=new MiniDFSClusterWithNodeGroup(builder); try { cluster.waitActive(); client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy(); long totalCapacity=TestBalancer.sum(capacities); long totalUsedSpace=totalCapacity * 3 / 10; long length=totalUsedSpace / numOfDatanodes; TestBalancer.createFile(cluster,filePath,length,(short)numOfDatanodes,0); LocatedBlocks lbs=client.getBlockLocations(filePath.toUri().getPath(),0,length); Set before=getBlocksOnRack(lbs.getLocatedBlocks(),RACK0); long newCapacity=CAPACITY; String newRack=RACK1; String newNodeGroup=NODEGROUP2; cluster.startDataNodes(conf,1,true,null,new String[]{newRack},new long[]{newCapacity},new String[]{newNodeGroup}); totalCapacity+=newCapacity; runBalancerCanFinish(conf,totalUsedSpace,totalCapacity); lbs=client.getBlockLocations(filePath.toUri().getPath(),0,length); Set after=getBlocksOnRack(lbs.getLocatedBlocks(),RACK0); assertEquals(before,after); } finally { cluster.shutdown(); } }

EqualityVerifier PublicFieldVerifier 
/** * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2) * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according * to replica placement policy with NodeGroup. As a result, n2 and n3 will be * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer * to end in 5 iterations without move block process. */ @Test(timeout=60000) public void testBalancerEndInNoMoveProgress() throws Exception { Configuration conf=createConf(); long[] capacities=new long[]{CAPACITY,CAPACITY,CAPACITY,CAPACITY}; String[] racks=new String[]{RACK0,RACK0,RACK1,RACK1}; String[] nodeGroups=new String[]{NODEGROUP0,NODEGROUP0,NODEGROUP1,NODEGROUP2}; int numOfDatanodes=capacities.length; assertEquals(numOfDatanodes,racks.length); assertEquals(numOfDatanodes,nodeGroups.length); MiniDFSCluster.Builder builder=new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length).racks(racks).simulatedCapacities(capacities); MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups); cluster=new MiniDFSClusterWithNodeGroup(builder); try { cluster.waitActive(); client=NameNodeProxies.createProxy(conf,cluster.getFileSystem(0).getUri(),ClientProtocol.class).getProxy(); long totalCapacity=TestBalancer.sum(capacities); long totalUsedSpace=totalCapacity * 6 / 10; TestBalancer.createFile(cluster,filePath,totalUsedSpace / 3,(short)(3),0); runBalancerCanFinish(conf,totalUsedSpace,totalCapacity); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestBlockInfo

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAddStorage() throws Exception { BlockInfo blockInfo=new BlockInfo(3); final DatanodeStorageInfo storage=DFSTestUtil.createDatanodeStorageInfo("storageID","127.0.0.1"); boolean added=blockInfo.addStorage(storage); Assert.assertTrue(added); Assert.assertEquals(storage,blockInfo.getStorageInfo(0)); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testBlockListMoveToHead() throws Exception { LOG.info("BlockInfo moveToHead tests..."); final int MAX_BLOCKS=10; DatanodeStorageInfo dd=DFSTestUtil.createDatanodeStorageInfo("s1","1.1.1.1"); ArrayList blockList=new ArrayList(MAX_BLOCKS); ArrayList blockInfoList=new ArrayList(); int headIndex; int curIndex; LOG.info("Building block list..."); for (int i=0; i < MAX_BLOCKS; i++) { blockList.add(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP)); blockInfoList.add(new BlockInfo(blockList.get(i),3)); dd.addBlock(blockInfoList.get(i)); assertEquals("Find datanode should be 0",0,blockInfoList.get(i).findStorageInfo(dd)); } LOG.info("Checking list length..."); assertEquals("Length should be MAX_BLOCK",MAX_BLOCKS,dd.numBlocks()); Iterator it=dd.getBlockIterator(); int len=0; while (it.hasNext()) { it.next(); len++; } assertEquals("There should be MAX_BLOCK blockInfo's",MAX_BLOCKS,len); headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd); LOG.info("Moving each block to the head of the list..."); for (int i=0; i < MAX_BLOCKS; i++) { curIndex=blockInfoList.get(i).findStorageInfo(dd); headIndex=dd.moveBlockToHead(blockInfoList.get(i),curIndex,headIndex); assertEquals("Block should be at the head of the list now.",blockInfoList.get(i),dd.getBlockListHeadForTesting()); } LOG.info("Moving head to the head..."); BlockInfo temp=dd.getBlockListHeadForTesting(); curIndex=0; headIndex=0; dd.moveBlockToHead(temp,curIndex,headIndex); assertEquals("Moving head to the head of the list shopuld not change the list",temp,dd.getBlockListHeadForTesting()); LOG.info("Checking elements of the list..."); temp=dd.getBlockListHeadForTesting(); assertNotNull("Head should not be null",temp); int c=MAX_BLOCKS - 1; while (temp != null) { assertEquals("Expected element is not on the list",blockInfoList.get(c--),temp); temp=temp.getNext(0); } LOG.info("Moving random blocks to the head of the list..."); headIndex=dd.getBlockListHeadForTesting().findStorageInfo(dd); Random rand=new Random(); for (int i=0; i < MAX_BLOCKS; i++) { int j=rand.nextInt(MAX_BLOCKS); curIndex=blockInfoList.get(j).findStorageInfo(dd); headIndex=dd.moveBlockToHead(blockInfoList.get(j),curIndex,headIndex); assertEquals("Block should be at the head of the list now.",blockInfoList.get(j),dd.getBlockListHeadForTesting()); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestBlockInfoUnderConstruction

InternalCallVerifier EqualityVerifier 
@Test public void testInitializeBlockRecovery() throws Exception { DatanodeStorageInfo s1=DFSTestUtil.createDatanodeStorageInfo("10.10.1.1","s1"); DatanodeDescriptor dd1=s1.getDatanodeDescriptor(); DatanodeStorageInfo s2=DFSTestUtil.createDatanodeStorageInfo("10.10.1.2","s2"); DatanodeDescriptor dd2=s2.getDatanodeDescriptor(); DatanodeStorageInfo s3=DFSTestUtil.createDatanodeStorageInfo("10.10.1.3","s3"); DatanodeDescriptor dd3=s3.getDatanodeDescriptor(); dd1.isAlive=dd2.isAlive=dd3.isAlive=true; BlockInfoUnderConstruction blockInfo=new BlockInfoUnderConstruction(new Block(0,0,GenerationStamp.LAST_RESERVED_STAMP),3,BlockUCState.UNDER_CONSTRUCTION,new DatanodeStorageInfo[]{s1,s2,s3}); long currentTime=System.currentTimeMillis(); dd1.setLastUpdate(currentTime - 3 * 1000); dd2.setLastUpdate(currentTime - 1 * 1000); dd3.setLastUpdate(currentTime - 2 * 1000); blockInfo.initializeBlockRecovery(1); BlockInfoUnderConstruction[] blockInfoRecovery=dd2.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0],blockInfo); currentTime=System.currentTimeMillis(); dd1.setLastUpdate(currentTime - 2 * 1000); dd2.setLastUpdate(currentTime - 1 * 1000); dd3.setLastUpdate(currentTime - 3 * 1000); blockInfo.initializeBlockRecovery(2); blockInfoRecovery=dd1.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0],blockInfo); currentTime=System.currentTimeMillis(); dd1.setLastUpdate(currentTime - 2 * 1000); dd2.setLastUpdate(currentTime - 1 * 1000); dd3.setLastUpdate(currentTime - 3 * 1000); currentTime=System.currentTimeMillis(); blockInfo.initializeBlockRecovery(3); blockInfoRecovery=dd3.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0],blockInfo); currentTime=System.currentTimeMillis(); dd1.setLastUpdate(currentTime - 2 * 1000); dd2.setLastUpdate(currentTime - 1 * 1000); dd3.setLastUpdate(currentTime); currentTime=System.currentTimeMillis(); blockInfo.initializeBlockRecovery(3); blockInfoRecovery=dd3.getLeaseRecoveryCommand(1); assertEquals(blockInfoRecovery[0],blockInfo); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSafeModeIBR() throws Exception { DatanodeDescriptor node=spy(nodes.get(0)); DatanodeStorageInfo ds=node.getStorageInfos()[0]; node.setDatanodeUuidForTesting(ds.getStorageID()); node.isAlive=true; DatanodeRegistration nodeReg=new DatanodeRegistration(node,null,null,""); doReturn(true).when(fsn).isInStartupSafeMode(); bm.getDatanodeManager().registerDatanode(nodeReg); bm.getDatanodeManager().addDatanode(node); assertEquals(node,bm.getDatanodeManager().getDatanode(node)); assertEquals(0,ds.getBlockReportCount()); reset(node); bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null)); assertEquals(1,ds.getBlockReportCount()); reset(node); bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null)); assertEquals(1,ds.getBlockReportCount()); bm.getDatanodeManager().removeDatanode(node); reset(node); bm.getDatanodeManager().registerDatanode(nodeReg); verify(node).updateRegInfo(nodeReg); assertEquals(0,ds.getBlockReportCount()); reset(node); bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null)); assertEquals(1,ds.getBlockReportCount()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSafeModeIBRAfterIncremental() throws Exception { DatanodeDescriptor node=spy(nodes.get(0)); DatanodeStorageInfo ds=node.getStorageInfos()[0]; node.setDatanodeUuidForTesting(ds.getStorageID()); node.isAlive=true; DatanodeRegistration nodeReg=new DatanodeRegistration(node,null,null,""); doReturn(true).when(fsn).isInStartupSafeMode(); bm.getDatanodeManager().registerDatanode(nodeReg); bm.getDatanodeManager().addDatanode(node); assertEquals(node,bm.getDatanodeManager().getDatanode(node)); assertEquals(0,ds.getBlockReportCount()); reset(node); doReturn(1).when(node).numBlocks(); bm.processReport(node,new DatanodeStorage(ds.getStorageID()),new BlockListAsLongs(null,null)); assertEquals(1,ds.getBlockReportCount()); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestBlockTokenWithDFS

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRead() throws Exception { MiniDFSCluster cluster=null; int numDataNodes=2; Configuration conf=getConf(numDataNodes); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); final NameNode nn=cluster.getNameNode(); final NamenodeProtocols nnProto=nn.getRpcServer(); final BlockManager bm=nn.getNamesystem().getBlockManager(); final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager(); SecurityTestUtil.setBlockTokenLifetime(sm,1000L); Path fileToRead=new Path(FILE_TO_READ); FileSystem fs=cluster.getFileSystem(); createFile(fs,fileToRead); FSDataInputStream in1=fs.open(fileToRead); assertTrue(checkFile1(in1)); FSDataInputStream in2=fs.open(fileToRead); assertTrue(checkFile1(in2)); FSDataInputStream in3=fs.open(fileToRead); assertTrue(checkFile2(in3)); DFSClient client=null; try { client=new DFSClient(new InetSocketAddress("localhost",cluster.getNameNodePort()),conf); } finally { if (client != null) client.close(); } List locatedBlocks=nnProto.getBlockLocations(FILE_TO_READ,0,FILE_SIZE).getLocatedBlocks(); LocatedBlock lblock=locatedBlocks.get(0); Token myToken=lblock.getBlockToken(); assertFalse(SecurityTestUtil.isBlockTokenExpired(myToken)); tryRead(conf,lblock,true); while (!SecurityTestUtil.isBlockTokenExpired(myToken)) { try { Thread.sleep(10); } catch ( InterruptedException ignored) { } } assertTrue(SecurityTestUtil.isBlockTokenExpired(myToken)); tryRead(conf,lblock,false); lblock.setBlockToken(sm.generateToken(lblock.getBlock(),EnumSet.of(BlockTokenSecretManager.AccessMode.READ))); tryRead(conf,lblock,true); ExtendedBlock wrongBlock=new ExtendedBlock(lblock.getBlock().getBlockPoolId(),lblock.getBlock().getBlockId() + 1); lblock.setBlockToken(sm.generateToken(wrongBlock,EnumSet.of(BlockTokenSecretManager.AccessMode.READ))); tryRead(conf,lblock,false); lblock.setBlockToken(sm.generateToken(lblock.getBlock(),EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE,BlockTokenSecretManager.AccessMode.COPY,BlockTokenSecretManager.AccessMode.REPLACE))); tryRead(conf,lblock,false); SecurityTestUtil.setBlockTokenLifetime(sm,600 * 1000L); List lblocks=DFSTestUtil.getAllBlocks(in1); for ( LocatedBlock blk : lblocks) { assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } in1.seek(0); assertTrue(checkFile1(in1)); List lblocks2=DFSTestUtil.getAllBlocks(in2); for ( LocatedBlock blk : lblocks2) { assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } assertTrue(in2.seekToNewSource(0)); assertTrue(checkFile1(in2)); List lblocks3=DFSTestUtil.getAllBlocks(in3); for ( LocatedBlock blk : lblocks3) { assertTrue(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } assertTrue(checkFile2(in3)); assertTrue(cluster.restartDataNodes(true)); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); cluster.shutdownNameNode(0); lblocks=DFSTestUtil.getAllBlocks(in1); for ( LocatedBlock blk : lblocks) { assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } in1.seek(0); assertTrue(checkFile1(in1)); lblocks2=DFSTestUtil.getAllBlocks(in2); for ( LocatedBlock blk : lblocks2) { assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } in2.seekToNewSource(0); assertTrue(checkFile1(in2)); lblocks3=DFSTestUtil.getAllBlocks(in3); for ( LocatedBlock blk : lblocks3) { assertFalse(SecurityTestUtil.isBlockTokenExpired(blk.getBlockToken())); } assertTrue(checkFile2(in3)); cluster.restartNameNode(0); cluster.shutdownNameNode(0); in1.seek(0); assertTrue(checkFile1(in1)); in2.seekToNewSource(0); assertTrue(checkFile1(in2)); assertTrue(checkFile2(in3)); cluster.restartNameNode(0); assertTrue(cluster.restartDataNodes(true)); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); cluster.shutdownNameNode(0); in1.seek(0); assertFalse(checkFile1(in1)); assertFalse(checkFile2(in3)); cluster.restartNameNode(0); in1.seek(0); assertTrue(checkFile1(in1)); in2.seekToNewSource(0); assertTrue(checkFile1(in2)); assertTrue(checkFile2(in3)); assertTrue(cluster.restartDataNodes(false)); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); in1.seek(0); assertTrue(checkFile1(in1)); in2.seekToNewSource(0); assertTrue(checkFile1(in2)); assertTrue(checkFile2(in3)); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * testing that APPEND operation can handle token expiration when * re-establishing pipeline is needed */ @Test public void testAppend() throws Exception { MiniDFSCluster cluster=null; int numDataNodes=2; Configuration conf=getConf(numDataNodes); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); final NameNode nn=cluster.getNameNode(); final BlockManager bm=nn.getNamesystem().getBlockManager(); final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager(); SecurityTestUtil.setBlockTokenLifetime(sm,1000L); Path fileToAppend=new Path(FILE_TO_APPEND); FileSystem fs=cluster.getFileSystem(); FSDataOutputStream stm=writeFile(fs,fileToAppend,(short)numDataNodes,BLOCK_SIZE); stm.write(rawData,0,1); stm.close(); stm=fs.append(fileToAppend); int mid=rawData.length - 1; stm.write(rawData,1,mid - 1); stm.hflush(); Token token=DFSTestUtil.getBlockToken(stm); while (!SecurityTestUtil.isBlockTokenExpired(token)) { try { Thread.sleep(10); } catch ( InterruptedException ignored) { } } cluster.stopDataNode(0); stm.write(rawData,mid,rawData.length - mid); stm.close(); FSDataInputStream in5=fs.open(fileToAppend); assertTrue(checkFile1(in5)); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * testing that WRITE operation can handle token expiration when * re-establishing pipeline is needed */ @Test public void testWrite() throws Exception { MiniDFSCluster cluster=null; int numDataNodes=2; Configuration conf=getConf(numDataNodes); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build(); cluster.waitActive(); assertEquals(numDataNodes,cluster.getDataNodes().size()); final NameNode nn=cluster.getNameNode(); final BlockManager bm=nn.getNamesystem().getBlockManager(); final BlockTokenSecretManager sm=bm.getBlockTokenSecretManager(); SecurityTestUtil.setBlockTokenLifetime(sm,1000L); Path fileToWrite=new Path(FILE_TO_WRITE); FileSystem fs=cluster.getFileSystem(); FSDataOutputStream stm=writeFile(fs,fileToWrite,(short)numDataNodes,BLOCK_SIZE); int mid=rawData.length - 1; stm.write(rawData,0,mid); stm.hflush(); Token token=DFSTestUtil.getBlockToken(stm); while (!SecurityTestUtil.isBlockTokenExpired(token)) { try { Thread.sleep(10); } catch ( InterruptedException ignored) { } } cluster.stopDataNode(0); stm.write(rawData,mid,rawData.length - mid); stm.close(); FSDataInputStream in4=fs.open(fileToWrite); assertTrue(checkFile1(in4)); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestBlocksWithNotEnoughRacks

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testCorruptBlockRereplicatedAcrossRacks() throws Exception { Configuration conf=getConf(); short REPLICATION_FACTOR=2; int fileLen=512; final Path filePath=new Path("/testFile"); String racks[]={"/rack1","/rack1","/rack2","/rack2"}; MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns=cluster.getNameNode().getNamesystem(); try { final FileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,filePath,fileLen,REPLICATION_FACTOR,1L); final String fileContent=DFSTestUtil.readFile(fs,filePath); ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath); DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0); int dnToCorrupt=DFSTestUtil.firstDnWithBlock(cluster,b); assertTrue(MiniDFSCluster.corruptReplica(dnToCorrupt,b)); cluster.restartDataNode(dnToCorrupt); DFSTestUtil.waitCorruptReplicas(fs,ns,filePath,b,1); DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0); for (int i=0; i < racks.length; i++) { String blockContent=cluster.readBlockOnDataNode(i,b); if (blockContent != null && i != dnToCorrupt) { assertEquals("Corrupt replica",fileContent,blockContent); } } } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testReduceReplFactorDueToRejoinRespectsRackPolicy() throws Exception { Configuration conf=getConf(); short REPLICATION_FACTOR=2; final Path filePath=new Path("/testFile"); String racks[]={"/rack1","/rack1","/rack2"}; MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build(); final FSNamesystem ns=cluster.getNameNode().getNamesystem(); final DatanodeManager dm=ns.getBlockManager().getDatanodeManager(); try { final FileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,filePath,1L,REPLICATION_FACTOR,1L); ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,filePath); DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0); ArrayList datanodes=cluster.getDataNodes(); assertEquals(3,datanodes.size()); DataNode dataNode=datanodes.get(2); DatanodeID dnId=dataNode.getDatanodeId(); cluster.stopDataNode(2); dm.removeDatanode(dnId); DFSTestUtil.waitForReplication(cluster,b,1,REPLICATION_FACTOR,1); String rack2[]={"/rack2"}; cluster.startDataNodes(conf,1,true,null,rack2); cluster.waitActive(); DFSTestUtil.waitForReplication(cluster,b,2,REPLICATION_FACTOR,0); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestCachedBlocksList

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testSingleList(){ DatanodeDescriptor dn=new DatanodeDescriptor(new DatanodeID("127.0.0.1","localhost","abcd",5000,5001,5002,5003)); CachedBlock[] blocks=new CachedBlock[]{new CachedBlock(0L,(short)1,true),new CachedBlock(1L,(short)1,true),new CachedBlock(2L,(short)1,true)}; Assert.assertTrue("expected pending cached list to start off empty.",!dn.getPendingCached().iterator().hasNext()); Assert.assertTrue("expected cached list to start off empty.",!dn.getCached().iterator().hasNext()); Assert.assertTrue("expected pending uncached list to start off empty.",!dn.getPendingUncached().iterator().hasNext()); Assert.assertTrue(dn.getCached().add(blocks[0])); Assert.assertTrue("expected pending cached list to still be empty.",!dn.getPendingCached().iterator().hasNext()); Assert.assertEquals("failed to insert blocks[0]",blocks[0],dn.getCached().iterator().next()); Assert.assertTrue("expected pending uncached list to still be empty.",!dn.getPendingUncached().iterator().hasNext()); Assert.assertTrue(dn.getCached().add(blocks[1])); Iterator iter=dn.getCached().iterator(); Assert.assertEquals(blocks[0],iter.next()); Assert.assertEquals(blocks[1],iter.next()); Assert.assertTrue(!iter.hasNext()); Assert.assertTrue(dn.getCached().addFirst(blocks[2])); iter=dn.getCached().iterator(); Assert.assertEquals(blocks[2],iter.next()); Assert.assertEquals(blocks[0],iter.next()); Assert.assertEquals(blocks[1],iter.next()); Assert.assertTrue(!iter.hasNext()); Assert.assertTrue(dn.getCached().remove(blocks[0])); iter=dn.getCached().iterator(); Assert.assertEquals(blocks[2],iter.next()); Assert.assertEquals(blocks[1],iter.next()); Assert.assertTrue(!iter.hasNext()); dn.getCached().clear(); Assert.assertTrue("expected cached list to be empty after clear.",!dn.getPendingCached().iterator().hasNext()); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestComputeInvalidateWork

BranchVerifier InternalCallVerifier EqualityVerifier 
/** * Test if {@link FSNamesystem#computeInvalidateWork(int)}can schedule invalidate work correctly */ @Test public void testCompInvalidate() throws Exception { final Configuration conf=new HdfsConfiguration(); final int NUM_OF_DATANODES=3; final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build(); try { cluster.waitActive(); final FSNamesystem namesystem=cluster.getNamesystem(); final BlockManager bm=namesystem.getBlockManager(); final int blockInvalidateLimit=bm.getDatanodeManager().blockInvalidateLimit; final DatanodeDescriptor[] nodes=bm.getDatanodeManager().getHeartbeatManager().getDatanodes(); assertEquals(nodes.length,NUM_OF_DATANODES); namesystem.writeLock(); try { for (int i=0; i < nodes.length; i++) { for (int j=0; j < 3 * blockInvalidateLimit + 1; j++) { Block block=new Block(i * (blockInvalidateLimit + 1) + j,0,GenerationStamp.LAST_RESERVED_STAMP); bm.addToInvalidates(block,nodes[i]); } } assertEquals(blockInvalidateLimit * NUM_OF_DATANODES,bm.computeInvalidateWork(NUM_OF_DATANODES + 1)); assertEquals(blockInvalidateLimit * NUM_OF_DATANODES,bm.computeInvalidateWork(NUM_OF_DATANODES)); assertEquals(blockInvalidateLimit * (NUM_OF_DATANODES - 1),bm.computeInvalidateWork(NUM_OF_DATANODES - 1)); int workCount=bm.computeInvalidateWork(1); if (workCount == 1) { assertEquals(blockInvalidateLimit + 1,bm.computeInvalidateWork(2)); } else { assertEquals(workCount,blockInvalidateLimit); assertEquals(2,bm.computeInvalidateWork(2)); } } finally { namesystem.writeUnlock(); } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestCorruptReplicaInfo

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCorruptReplicaInfo() throws IOException, InterruptedException { CorruptReplicasMap crm=new CorruptReplicasMap(); assertEquals("Number of corrupt blocks must initially be 0",0,crm.size()); assertNull("Param n cannot be less than 0",crm.getCorruptReplicaBlockIds(-1,null)); assertNull("Param n cannot be greater than 100",crm.getCorruptReplicaBlockIds(101,null)); long[] l=crm.getCorruptReplicaBlockIds(0,null); assertNotNull("n = 0 must return non-null",l); assertEquals("n = 0 must return an empty list",0,l.length); int NUM_BLOCK_IDS=140; List block_ids=new LinkedList(); for (int i=0; i < NUM_BLOCK_IDS; i++) { block_ids.add((long)i); } DatanodeDescriptor dn1=DFSTestUtil.getLocalDatanodeDescriptor(); DatanodeDescriptor dn2=DFSTestUtil.getLocalDatanodeDescriptor(); addToCorruptReplicasMap(crm,getBlock(0),dn1); assertEquals("Number of corrupt blocks not returning correctly",1,crm.size()); addToCorruptReplicasMap(crm,getBlock(1),dn1); assertEquals("Number of corrupt blocks not returning correctly",2,crm.size()); addToCorruptReplicasMap(crm,getBlock(1),dn2); assertEquals("Number of corrupt blocks not returning correctly",2,crm.size()); crm.removeFromCorruptReplicasMap(getBlock(1)); assertEquals("Number of corrupt blocks not returning correctly",1,crm.size()); crm.removeFromCorruptReplicasMap(getBlock(0)); assertEquals("Number of corrupt blocks not returning correctly",0,crm.size()); for ( Long block_id : block_ids) { addToCorruptReplicasMap(crm,getBlock(block_id),dn1); } assertEquals("Number of corrupt blocks not returning correctly",NUM_BLOCK_IDS,crm.size()); assertTrue("First five block ids not returned correctly ",Arrays.equals(new long[]{0,1,2,3,4},crm.getCorruptReplicaBlockIds(5,null))); LOG.info(crm.getCorruptReplicaBlockIds(10,7L)); LOG.info(block_ids.subList(7,18)); assertTrue("10 blocks after 7 not returned correctly ",Arrays.equals(new long[]{8,9,10,11,12,13,14,15,16,17},crm.getCorruptReplicaBlockIds(10,7L))); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestDatanodeDescriptor

InternalCallVerifier EqualityVerifier 
/** * Test that getInvalidateBlocks observes the maxlimit. */ @Test public void testGetInvalidateBlocks() throws Exception { final int MAX_BLOCKS=10; final int REMAINING_BLOCKS=2; final int MAX_LIMIT=MAX_BLOCKS - REMAINING_BLOCKS; DatanodeDescriptor dd=DFSTestUtil.getLocalDatanodeDescriptor(); ArrayList blockList=new ArrayList(MAX_BLOCKS); for (int i=0; i < MAX_BLOCKS; i++) { blockList.add(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP)); } dd.addBlocksToBeInvalidated(blockList); Block[] bc=dd.getInvalidateBlocks(MAX_LIMIT); assertEquals(bc.length,MAX_LIMIT); bc=dd.getInvalidateBlocks(MAX_LIMIT); assertEquals(bc.length,REMAINING_BLOCKS); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBlocksCounter() throws Exception { DatanodeDescriptor dd=BlockManagerTestUtil.getLocalDatanodeDescriptor(true); assertEquals(0,dd.numBlocks()); BlockInfo blk=new BlockInfo(new Block(1L),1); BlockInfo blk1=new BlockInfo(new Block(2L),2); DatanodeStorageInfo[] storages=dd.getStorageInfos(); assertTrue(storages.length > 0); final String storageID=storages[0].getStorageID(); assertTrue(storages[0].addBlock(blk)); assertEquals(1,dd.numBlocks()); assertFalse(dd.removeBlock(blk1)); assertEquals(1,dd.numBlocks()); assertFalse(storages[0].addBlock(blk)); assertEquals(1,dd.numBlocks()); assertTrue(storages[0].addBlock(blk1)); assertEquals(2,dd.numBlocks()); assertTrue(dd.removeBlock(blk)); assertEquals(1,dd.numBlocks()); assertTrue(dd.removeBlock(blk1)); assertEquals(0,dd.numBlocks()); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestDatanodeManager

IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * This test sends a random sequence of node registrations and node removals * to the DatanodeManager (of nodes with different IDs and versions), and * checks that the DatanodeManager keeps a correct count of different software * versions at all times. */ @Test public void testNumVersionsReportedCorrect() throws IOException { FSNamesystem fsn=Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); DatanodeManager dm=new DatanodeManager(Mockito.mock(BlockManager.class),fsn,new Configuration()); Random rng=new Random(); int seed=rng.nextInt(); rng=new Random(seed); LOG.info("Using seed " + seed + " for testing"); HashMap sIdToDnReg=new HashMap(); for (int i=0; i < NUM_ITERATIONS; ++i) { if (rng.nextBoolean() && i % 3 == 0 && sIdToDnReg.size() != 0) { int randomIndex=rng.nextInt() % sIdToDnReg.size(); Iterator> it=sIdToDnReg.entrySet().iterator(); for (int j=0; j < randomIndex - 1; ++j) { it.next(); } DatanodeRegistration toRemove=it.next().getValue(); LOG.info("Removing node " + toRemove.getDatanodeUuid() + " ip "+ toRemove.getXferAddr()+ " version : "+ toRemove.getSoftwareVersion()); dm.removeDatanode(toRemove); it.remove(); } else { String storageID="someStorageID" + rng.nextInt(5000); DatanodeRegistration dr=Mockito.mock(DatanodeRegistration.class); Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID); if (sIdToDnReg.containsKey(storageID)) { dr=sIdToDnReg.get(storageID); if (rng.nextBoolean()) { dr.setIpAddr(dr.getIpAddr() + "newIP"); } } else { String ip="someIP" + storageID; Mockito.when(dr.getIpAddr()).thenReturn(ip); Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000"); Mockito.when(dr.getXferPort()).thenReturn(9000); } Mockito.when(dr.getSoftwareVersion()).thenReturn("version" + rng.nextInt(5)); LOG.info("Registering node storageID: " + dr.getDatanodeUuid() + ", version: "+ dr.getSoftwareVersion()+ ", IP address: "+ dr.getXferAddr()); dm.registerDatanode(dr); sIdToDnReg.put(storageID,dr); } Map mapToCheck=dm.getDatanodesSoftwareVersions(); for ( Entry it : sIdToDnReg.entrySet()) { String ver=it.getValue().getSoftwareVersion(); if (!mapToCheck.containsKey(ver)) { throw new AssertionError("The correct number of datanodes of a " + "version was not found on iteration " + i); } mapToCheck.put(ver,mapToCheck.get(ver) - 1); if (mapToCheck.get(ver) == 0) { mapToCheck.remove(ver); } } for ( Entry entry : mapToCheck.entrySet()) { LOG.info("Still in map: " + entry.getKey() + " has "+ entry.getValue()); } assertEquals("The map of version counts returned by DatanodeManager was" + " not what it was expected to be on iteration " + i,0,mapToCheck.size()); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestHeartbeatHandling

EqualityVerifier 
/** * Test if{@link FSNamesystem#handleHeartbeat}can pick up replication and/or invalidate requests and observes the max * limit */ @Test public void testHeartbeat() throws Exception { final Configuration conf=new HdfsConfiguration(); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); final FSNamesystem namesystem=cluster.getNamesystem(); final HeartbeatManager hm=namesystem.getBlockManager().getDatanodeManager().getHeartbeatManager(); final String poolId=namesystem.getBlockPoolId(); final DatanodeRegistration nodeReg=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0),poolId); final DatanodeDescriptor dd=NameNodeAdapter.getDatanode(namesystem,nodeReg); final String storageID=DatanodeStorage.generateUuid(); dd.updateStorage(new DatanodeStorage(storageID)); final int REMAINING_BLOCKS=1; final int MAX_REPLICATE_LIMIT=conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,2); final int MAX_INVALIDATE_LIMIT=DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT; final int MAX_INVALIDATE_BLOCKS=2 * MAX_INVALIDATE_LIMIT + REMAINING_BLOCKS; final int MAX_REPLICATE_BLOCKS=2 * MAX_REPLICATE_LIMIT + REMAINING_BLOCKS; final DatanodeStorageInfo[] ONE_TARGET={dd.getStorageInfo(storageID)}; try { namesystem.writeLock(); synchronized (hm) { for (int i=0; i < MAX_REPLICATE_BLOCKS; i++) { dd.addBlockToBeReplicated(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP),ONE_TARGET); } DatanodeCommand[] cmds=NameNodeAdapter.sendHeartBeat(nodeReg,dd,namesystem).getCommands(); assertEquals(1,cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER,cmds[0].getAction()); assertEquals(MAX_REPLICATE_LIMIT,((BlockCommand)cmds[0]).getBlocks().length); ArrayList blockList=new ArrayList(MAX_INVALIDATE_BLOCKS); for (int i=0; i < MAX_INVALIDATE_BLOCKS; i++) { blockList.add(new Block(i,0,GenerationStamp.LAST_RESERVED_STAMP)); } dd.addBlocksToBeInvalidated(blockList); cmds=NameNodeAdapter.sendHeartBeat(nodeReg,dd,namesystem).getCommands(); assertEquals(2,cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER,cmds[0].getAction()); assertEquals(MAX_REPLICATE_LIMIT,((BlockCommand)cmds[0]).getBlocks().length); assertEquals(DatanodeProtocol.DNA_INVALIDATE,cmds[1].getAction()); assertEquals(MAX_INVALIDATE_LIMIT,((BlockCommand)cmds[1]).getBlocks().length); cmds=NameNodeAdapter.sendHeartBeat(nodeReg,dd,namesystem).getCommands(); assertEquals(2,cmds.length); assertEquals(DatanodeProtocol.DNA_TRANSFER,cmds[0].getAction()); assertEquals(REMAINING_BLOCKS,((BlockCommand)cmds[0]).getBlocks().length); assertEquals(DatanodeProtocol.DNA_INVALIDATE,cmds[1].getAction()); assertEquals(MAX_INVALIDATE_LIMIT,((BlockCommand)cmds[1]).getBlocks().length); cmds=NameNodeAdapter.sendHeartBeat(nodeReg,dd,namesystem).getCommands(); assertEquals(1,cmds.length); assertEquals(DatanodeProtocol.DNA_INVALIDATE,cmds[0].getAction()); assertEquals(REMAINING_BLOCKS,((BlockCommand)cmds[0]).getBlocks().length); cmds=NameNodeAdapter.sendHeartBeat(nodeReg,dd,namesystem).getCommands(); assertEquals(0,cmds.length); } } finally { namesystem.writeUnlock(); } } finally { cluster.shutdown(); } }

EqualityVerifier 
/** * Test if{@link FSNamesystem#handleHeartbeat}correctly selects data node targets for block recovery. */ @Test public void testHeartbeatBlockRecovery() throws Exception { final Configuration conf=new HdfsConfiguration(); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); try { cluster.waitActive(); final FSNamesystem namesystem=cluster.getNamesystem(); final HeartbeatManager hm=namesystem.getBlockManager().getDatanodeManager().getHeartbeatManager(); final String poolId=namesystem.getBlockPoolId(); final DatanodeRegistration nodeReg1=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0),poolId); final DatanodeDescriptor dd1=NameNodeAdapter.getDatanode(namesystem,nodeReg1); dd1.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid())); final DatanodeRegistration nodeReg2=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(1),poolId); final DatanodeDescriptor dd2=NameNodeAdapter.getDatanode(namesystem,nodeReg2); dd2.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid())); final DatanodeRegistration nodeReg3=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2),poolId); final DatanodeDescriptor dd3=NameNodeAdapter.getDatanode(namesystem,nodeReg3); dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid())); try { namesystem.writeLock(); synchronized (hm) { NameNodeAdapter.sendHeartBeat(nodeReg1,dd1,namesystem); NameNodeAdapter.sendHeartBeat(nodeReg2,dd2,namesystem); NameNodeAdapter.sendHeartBeat(nodeReg3,dd3,namesystem); dd1.setLastUpdate(System.currentTimeMillis()); dd2.setLastUpdate(System.currentTimeMillis()); dd3.setLastUpdate(System.currentTimeMillis()); final DatanodeStorageInfo[] storages={dd1.getStorageInfos()[0],dd2.getStorageInfos()[0],dd3.getStorageInfos()[0]}; BlockInfoUnderConstruction blockInfo=new BlockInfoUnderConstruction(new Block(0,0,GenerationStamp.LAST_RESERVED_STAMP),3,BlockUCState.UNDER_RECOVERY,storages); dd1.addBlockToBeRecovered(blockInfo); DatanodeCommand[] cmds=NameNodeAdapter.sendHeartBeat(nodeReg1,dd1,namesystem).getCommands(); assertEquals(1,cmds.length); assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK,cmds[0].getAction()); BlockRecoveryCommand recoveryCommand=(BlockRecoveryCommand)cmds[0]; assertEquals(1,recoveryCommand.getRecoveringBlocks().size()); DatanodeInfo[] recoveringNodes=recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations(); assertEquals(3,recoveringNodes.length); assertEquals(recoveringNodes[0],dd1); assertEquals(recoveringNodes[1],dd2); assertEquals(recoveringNodes[2],dd3); dd1.setLastUpdate(System.currentTimeMillis()); dd2.setLastUpdate(System.currentTimeMillis() - 40 * 1000); dd3.setLastUpdate(System.currentTimeMillis()); blockInfo=new BlockInfoUnderConstruction(new Block(0,0,GenerationStamp.LAST_RESERVED_STAMP),3,BlockUCState.UNDER_RECOVERY,storages); dd1.addBlockToBeRecovered(blockInfo); cmds=NameNodeAdapter.sendHeartBeat(nodeReg1,dd1,namesystem).getCommands(); assertEquals(1,cmds.length); assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK,cmds[0].getAction()); recoveryCommand=(BlockRecoveryCommand)cmds[0]; assertEquals(1,recoveryCommand.getRecoveringBlocks().size()); recoveringNodes=recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations(); assertEquals(2,recoveringNodes.length); assertEquals(recoveringNodes[0],dd1); assertEquals(recoveringNodes[1],dd3); dd1.setLastUpdate(System.currentTimeMillis() - 60 * 1000); dd2.setLastUpdate(System.currentTimeMillis() - 40 * 1000); dd3.setLastUpdate(System.currentTimeMillis() - 80 * 1000); blockInfo=new BlockInfoUnderConstruction(new Block(0,0,GenerationStamp.LAST_RESERVED_STAMP),3,BlockUCState.UNDER_RECOVERY,storages); dd1.addBlockToBeRecovered(blockInfo); cmds=NameNodeAdapter.sendHeartBeat(nodeReg1,dd1,namesystem).getCommands(); assertEquals(1,cmds.length); assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK,cmds[0].getAction()); recoveryCommand=(BlockRecoveryCommand)cmds[0]; assertEquals(1,recoveryCommand.getRecoveringBlocks().size()); recoveringNodes=recoveryCommand.getRecoveringBlocks().toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations(); assertEquals(3,recoveringNodes.length); assertEquals(recoveringNodes[0],dd1); assertEquals(recoveringNodes[1],dd2); assertEquals(recoveringNodes[2],dd3); } } finally { namesystem.writeUnlock(); } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestHost2NodesMap

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRemove() throws Exception { DatanodeDescriptor nodeNotInMap=DFSTestUtil.getDatanodeDescriptor("3.3.3.3","/d1/r4"); assertFalse(map.remove(nodeNotInMap)); assertTrue(map.remove(dataNodes[0])); assertTrue(map.getDatanodeByHost("1.1.1.1.") == null); assertTrue(map.getDatanodeByHost("2.2.2.2") == dataNodes[1]); DatanodeDescriptor node=map.getDatanodeByHost("3.3.3.3"); assertTrue(node == dataNodes[2] || node == dataNodes[3]); assertNull(map.getDatanodeByHost("4.4.4.4")); assertTrue(map.remove(dataNodes[2])); assertNull(map.getDatanodeByHost("1.1.1.1")); assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]); assertEquals(map.getDatanodeByHost("3.3.3.3"),dataNodes[3]); assertTrue(map.remove(dataNodes[3])); assertNull(map.getDatanodeByHost("1.1.1.1")); assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]); assertNull(map.getDatanodeByHost("3.3.3.3")); assertFalse(map.remove(null)); assertTrue(map.remove(dataNodes[1])); assertFalse(map.remove(dataNodes[1])); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetDatanodeByHost() throws Exception { assertEquals(map.getDatanodeByHost("1.1.1.1"),dataNodes[0]); assertEquals(map.getDatanodeByHost("2.2.2.2"),dataNodes[1]); DatanodeDescriptor node=map.getDatanodeByHost("3.3.3.3"); assertTrue(node == dataNodes[2] || node == dataNodes[3]); assertNull(map.getDatanodeByHost("4.4.4.4")); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestHostFileManager

InternalCallVerifier EqualityVerifier 
@Test @SuppressWarnings("unchecked") public void testIncludeExcludeLists() throws IOException { BlockManager bm=mock(BlockManager.class); FSNamesystem fsn=mock(FSNamesystem.class); Configuration conf=new Configuration(); HostFileManager hm=mock(HostFileManager.class); HostFileManager.HostSet includedNodes=new HostFileManager.HostSet(); HostFileManager.HostSet excludedNodes=new HostFileManager.HostSet(); includedNodes.add(entry("127.0.0.1:12345")); includedNodes.add(entry("localhost:12345")); includedNodes.add(entry("127.0.0.1:12345")); includedNodes.add(entry("127.0.0.2")); excludedNodes.add(entry("127.0.0.1:12346")); excludedNodes.add(entry("127.0.30.1:12346")); Assert.assertEquals(2,includedNodes.size()); Assert.assertEquals(2,excludedNodes.size()); doReturn(includedNodes).when(hm).getIncludes(); doReturn(excludedNodes).when(hm).getExcludes(); DatanodeManager dm=new DatanodeManager(bm,fsn,conf); Whitebox.setInternalState(dm,"hostFileManager",hm); Map dnMap=(Map)Whitebox.getInternalState(dm,"datanodeMap"); Assert.assertEquals(2,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL).size()); Assert.assertEquals(2,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size()); dnMap.put("uuid-foo",new DatanodeDescriptor(new DatanodeID("127.0.0.1","localhost","uuid-foo",12345,1020,1021,1022))); Assert.assertEquals(1,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size()); dnMap.put("uuid-bar",new DatanodeDescriptor(new DatanodeID("127.0.0.2","127.0.0.2","uuid-bar",12345,1020,1021,1022))); Assert.assertEquals(0,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size()); DatanodeDescriptor spam=new DatanodeDescriptor(new DatanodeID("127.0.0" + ".3","127.0.0.3","uuid-spam",12345,1020,1021,1022)); spam.setLastUpdate(0); includedNodes.add(entry("127.0.0.3:12345")); dnMap.put("uuid-spam",spam); Assert.assertEquals(1,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size()); dnMap.remove("uuid-spam"); Assert.assertEquals(1,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size()); excludedNodes.add(entry("127.0.0.3")); Assert.assertEquals(0,dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD).size()); }

InternalCallVerifier EqualityVerifier 
@Test public void testDeduplication(){ HostFileManager.HostSet s=new HostFileManager.HostSet(); s.add(entry("127.0.0.1:12345")); s.add(entry("localhost:12345")); Assert.assertEquals(1,s.size()); s.add(entry("127.0.0.1:12345")); Assert.assertEquals(1,s.size()); s.add(entry("127.0.0.1:12346")); Assert.assertEquals(2,s.size()); s.add(entry("127.0.0.1")); Assert.assertEquals(3,s.size()); s.add(entry("127.0.0.10")); Assert.assertEquals(4,s.size()); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestOverReplicatedBlocks

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test processOverReplicatedBlock can handle corrupt replicas fine. * It make sure that it won't treat corrupt replicas as valid ones * thus prevents NN deleting valid replicas but keeping * corrupt ones. */ @Test public void testProcesOverReplicateBlock() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2)); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs=cluster.getFileSystem(); try { final Path fileName=new Path("/foo1"); DFSTestUtil.createFile(fs,fileName,2,(short)3,0L); DFSTestUtil.waitReplication(fs,fileName,(short)3); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName); assertTrue(TestDatanodeBlockScanner.corruptReplica(block,0)); DataNodeProperties dnProps=cluster.stopDataNode(0); File scanLog=new File(MiniDFSCluster.getFinalizedDir(cluster.getInstanceStorageDir(0,0),cluster.getNamesystem().getBlockPoolId()).getParent().toString() + "/../dncp_block_verification.log.prev"); for (int i=0; !scanLog.delete(); i++) { assertTrue("Could not delete log file in one minute",i < 60); try { Thread.sleep(1000); } catch ( InterruptedException ignored) { } } cluster.restartDataNode(dnProps); DFSTestUtil.waitReplication(fs,fileName,(short)2); String blockPoolId=cluster.getNamesystem().getBlockPoolId(); final DatanodeID corruptDataNode=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(2),blockPoolId); final FSNamesystem namesystem=cluster.getNamesystem(); final BlockManager bm=namesystem.getBlockManager(); final HeartbeatManager hm=bm.getDatanodeManager().getHeartbeatManager(); try { namesystem.writeLock(); synchronized (hm) { String corruptMachineName=corruptDataNode.getXferAddr(); for ( DatanodeDescriptor datanode : hm.getDatanodes()) { if (!corruptMachineName.equals(datanode.getXferAddr())) { datanode.getStorageInfos()[0].setUtilizationForTesting(100L,100L,0,100L); datanode.updateHeartbeat(BlockManagerTestUtil.getStorageReportsForDatanode(datanode),0L,0L,0,0); } } NameNodeAdapter.setReplication(namesystem,fileName.toString(),(short)1); assertEquals(1,bm.countNodes(block.getLocalBlock()).liveReplicas()); } } finally { namesystem.writeUnlock(); } } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test over replicated block should get invalidated when decreasing the * replication for a partial block. */ @Test public void testInvalidateOverReplicatedBlock() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); try { final FSNamesystem namesystem=cluster.getNamesystem(); final BlockManager bm=namesystem.getBlockManager(); FileSystem fs=cluster.getFileSystem(); Path p=new Path(MiniDFSCluster.getBaseDirectory(),"/foo1"); FSDataOutputStream out=fs.create(p,(short)2); out.writeBytes("HDFS-3119: " + p); out.hsync(); fs.setReplication(p,(short)1); out.close(); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,p); assertEquals("Expected only one live replica for the block",1,bm.countNodes(block.getLocalBlock()).liveReplicas()); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * The test verifies that replica for deletion is chosen on a node, * with the oldest heartbeat, when this heartbeat is larger than the * tolerable heartbeat interval. * It creates a file with several blocks and replication 4. * The last DN is configured to send heartbeats rarely. * Test waits until the tolerable heartbeat interval expires, and reduces * replication of the file. All replica deletions should be scheduled for the * last node. No replicas will actually be deleted, since last DN doesn't * send heartbeats. */ @Test public void testChooseReplicaToDelete() throws Exception { MiniDFSCluster cluster=null; FileSystem fs=null; try { Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,SMALL_BLOCK_SIZE); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); fs=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,300); cluster.startDataNodes(conf,1,true,null,null,null); DataNode lastDN=cluster.getDataNodes().get(3); DatanodeRegistration dnReg=DataNodeTestUtils.getDNRegistrationForBP(lastDN,namesystem.getBlockPoolId()); String lastDNid=dnReg.getDatanodeUuid(); final Path fileName=new Path("/foo2"); DFSTestUtil.createFile(fs,fileName,SMALL_FILE_LENGTH,(short)4,0L); DFSTestUtil.waitReplication(fs,fileName,(short)4); DatanodeDescriptor nodeInfo=null; long lastHeartbeat=0; long waitTime=DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 * (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1); do { nodeInfo=namesystem.getBlockManager().getDatanodeManager().getDatanode(dnReg); lastHeartbeat=nodeInfo.getLastUpdate(); } while (now() - lastHeartbeat < waitTime); fs.setReplication(fileName,(short)3); BlockLocation locs[]=fs.getFileBlockLocations(fs.getFileStatus(fileName),0,Long.MAX_VALUE); namesystem.readLock(); Collection dnBlocks=namesystem.getBlockManager().excessReplicateMap.get(lastDNid); assertEquals("Replicas on node " + lastDNid + " should have been deleted",SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE,dnBlocks.size()); namesystem.readUnlock(); for ( BlockLocation location : locs) assertEquals("Block should still have 4 replicas",4,location.getNames().length); } finally { if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestPendingDataNodeMessages

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testQueues(){ DatanodeDescriptor fakeDN=DFSTestUtil.getLocalDatanodeDescriptor(); DatanodeStorage storage=new DatanodeStorage("STORAGE_ID"); DatanodeStorageInfo storageInfo=new DatanodeStorageInfo(fakeDN,storage); msgs.enqueueReportedBlock(storageInfo,block1Gs1,ReplicaState.FINALIZED); msgs.enqueueReportedBlock(storageInfo,block1Gs2,ReplicaState.FINALIZED); assertEquals(2,msgs.count()); assertNull(msgs.takeBlockQueue(block2Gs1)); assertEquals(2,msgs.count()); Queue q=msgs.takeBlockQueue(block1Gs2DifferentInstance); assertEquals("ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," + "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",Joiner.on(",").join(q)); assertEquals(0,msgs.count()); assertNull(msgs.takeBlockQueue(block1Gs1)); assertEquals(0,msgs.count()); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestPendingInvalidateBlock

InternalCallVerifier EqualityVerifier 
@Test public void testPendingDeletion() throws Exception { final Path foo=new Path("/foo"); DFSTestUtil.createFile(dfs,foo,BLOCKSIZE,REPLICATION,0); cluster.restartNameNode(true); dfs.delete(foo,true); Assert.assertEquals(0,cluster.getNamesystem().getBlocksTotal()); Assert.assertEquals(REPLICATION,cluster.getNamesystem().getPendingDeletionBlocks()); Thread.sleep(6000); Assert.assertEquals(0,cluster.getNamesystem().getBlocksTotal()); Assert.assertEquals(0,cluster.getNamesystem().getPendingDeletionBlocks()); }

InternalCallVerifier EqualityVerifier 
/** * Test whether we can delay the deletion of unknown blocks in DataNode's * first several block reports. */ @Test public void testPendingDeleteUnknownBlocks() throws Exception { final int fileNum=5; final Path[] files=new Path[fileNum]; final DataNodeProperties[] dnprops=new DataNodeProperties[REPLICATION]; for (int i=0; i < fileNum; i++) { files[i]=new Path("/file" + i); DFSTestUtil.createFile(dfs,files[i],BLOCKSIZE,REPLICATION,i); } waitForReplication(); for (int i=REPLICATION - 1; i >= 0; i--) { dnprops[i]=cluster.stopDataNode(i); } Thread.sleep(2000); for (int i=0; i < 2; i++) { dfs.delete(files[i],true); } cluster.restartNameNode(false); InvalidateBlocks invalidateBlocks=(InvalidateBlocks)Whitebox.getInternalState(cluster.getNamesystem().getBlockManager(),"invalidateBlocks"); InvalidateBlocks mockIb=Mockito.spy(invalidateBlocks); Mockito.doReturn(1L).when(mockIb).getInvalidationDelay(); Whitebox.setInternalState(cluster.getNamesystem().getBlockManager(),"invalidateBlocks",mockIb); Assert.assertEquals(0L,cluster.getNamesystem().getPendingDeletionBlocks()); for (int i=0; i < REPLICATION; i++) { cluster.restartDataNode(dnprops[i],true); } cluster.waitActive(); for (int i=0; i < REPLICATION; i++) { DataNodeTestUtils.triggerBlockReport(cluster.getDataNodes().get(i)); } Thread.sleep(2000); Assert.assertEquals(3,cluster.getNamesystem().getBlocksTotal()); Assert.assertEquals(4,cluster.getNamesystem().getPendingDeletionBlocks()); cluster.restartNameNode(true); Thread.sleep(6000); Assert.assertEquals(3,cluster.getNamesystem().getBlocksTotal()); Assert.assertEquals(0,cluster.getNamesystem().getPendingDeletionBlocks()); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestPendingReplication

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the * pending replications. Also make sure the blockReceivedAndDeleted call is * idempotent to the pending replications. */ @Test public void testBlockReceived() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build(); cluster.waitActive(); DistributedFileSystem hdfs=cluster.getFileSystem(); FSNamesystem fsn=cluster.getNamesystem(); BlockManager blkManager=fsn.getBlockManager(); final String file="/tmp.txt"; final Path filePath=new Path(file); short replFactor=1; DFSTestUtil.createFile(hdfs,filePath,1024L,replFactor,0); ArrayList datanodes=cluster.getDataNodes(); for (int i=0; i < DATANODE_COUNT; i++) { DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i),true); } hdfs.setReplication(filePath,(short)DATANODE_COUNT); BlockManagerTestUtil.computeAllPendingWork(blkManager); assertEquals(1,blkManager.pendingReplications.size()); INodeFile fileNode=fsn.getFSDirectory().getINode4Write(file).asFile(); Block[] blocks=fileNode.getBlocks(); assertEquals(DATANODE_COUNT - 1,blkManager.pendingReplications.getNumReplicas(blocks[0])); LocatedBlock locatedBlock=hdfs.getClient().getLocatedBlocks(file,0).get(0); DatanodeInfo existingDn=(locatedBlock.getLocations())[0]; int reportDnNum=0; String poolId=cluster.getNamesystem().getBlockPoolId(); for (int i=0; i < DATANODE_COUNT && reportDnNum < 2; i++) { if (!datanodes.get(i).getDatanodeId().equals(existingDn)) { DatanodeRegistration dnR=datanodes.get(i).getDNRegistrationForBP(poolId); StorageReceivedDeletedBlocks[] report={new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",new ReceivedDeletedBlockInfo[]{new ReceivedDeletedBlockInfo(blocks[0],BlockStatus.RECEIVED_BLOCK,"")})}; cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR,poolId,report); reportDnNum++; } } assertEquals(DATANODE_COUNT - 3,blkManager.pendingReplications.getNumReplicas(blocks[0])); for (int i=0; i < DATANODE_COUNT && reportDnNum < 2; i++) { if (!datanodes.get(i).getDatanodeId().equals(existingDn)) { DatanodeRegistration dnR=datanodes.get(i).getDNRegistrationForBP(poolId); StorageReceivedDeletedBlocks[] report={new StorageReceivedDeletedBlocks("Fake-storage-ID-Ignored",new ReceivedDeletedBlockInfo[]{new ReceivedDeletedBlockInfo(blocks[0],BlockStatus.RECEIVED_BLOCK,"")})}; cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR,poolId,report); reportDnNum++; } } assertEquals(DATANODE_COUNT - 3,blkManager.pendingReplications.getNumReplicas(blocks[0])); for (int i=0; i < DATANODE_COUNT; i++) { DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i),false); DataNodeTestUtils.triggerHeartbeat(datanodes.get(i)); } Thread.sleep(5000); assertEquals(0,blkManager.pendingReplications.size()); } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier EqualityVerifier 
/** * Test if BlockManager can correctly remove corresponding pending records * when a file is deleted * @throws Exception */ @Test public void testPendingAndInvalidate() throws Exception { final Configuration CONF=new HdfsConfiguration(); CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024); CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,DFS_REPLICATION_INTERVAL); CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,DFS_REPLICATION_INTERVAL); MiniDFSCluster cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(DATANODE_COUNT).build(); cluster.waitActive(); FSNamesystem namesystem=cluster.getNamesystem(); BlockManager bm=namesystem.getBlockManager(); DistributedFileSystem fs=cluster.getFileSystem(); try { Path filePath=new Path("/tmp.txt"); DFSTestUtil.createFile(fs,filePath,1024,(short)3,0L); for ( DataNode dn : cluster.getDataNodes()) { DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true); } LocatedBlock block=NameNodeAdapter.getBlockLocations(cluster.getNameNode(),filePath.toString(),0,1).get(0); cluster.getNamesystem().writeLock(); try { bm.findAndMarkBlockAsCorrupt(block.getBlock(),block.getLocations()[0],"STORAGE_ID","TEST"); bm.findAndMarkBlockAsCorrupt(block.getBlock(),block.getLocations()[1],"STORAGE_ID","TEST"); } finally { cluster.getNamesystem().writeUnlock(); } BlockManagerTestUtil.computeAllPendingWork(bm); BlockManagerTestUtil.updateState(bm); assertEquals(bm.getPendingReplicationBlocksCount(),1L); assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock().getLocalBlock()),2); fs.delete(filePath,true); int retries=10; long pendingNum=bm.getPendingReplicationBlocksCount(); while (pendingNum != 0 && retries-- > 0) { Thread.sleep(1000); BlockManagerTestUtil.updateState(bm); pendingNum=bm.getPendingReplicationBlocksCount(); } assertEquals(pendingNum,0L); } finally { cluster.shutdown(); } }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPendingReplication(){ PendingReplicationBlocks pendingReplications; pendingReplications=new PendingReplicationBlocks(TIMEOUT * 1000); pendingReplications.start(); DatanodeStorageInfo[] storages=DFSTestUtil.createDatanodeStorageInfos(10); for (int i=0; i < storages.length; i++) { Block block=new Block(i,i,0); DatanodeStorageInfo[] targets=new DatanodeStorageInfo[i]; System.arraycopy(storages,0,targets,0,i); pendingReplications.increment(block,DatanodeStorageInfo.toDatanodeDescriptors(targets)); } assertEquals("Size of pendingReplications ",10,pendingReplications.size()); Block blk=new Block(8,8,0); pendingReplications.decrement(blk,storages[7].getDatanodeDescriptor()); assertEquals("pendingReplications.getNumReplicas ",7,pendingReplications.getNumReplicas(blk)); for (int i=0; i < 7; i++) { pendingReplications.decrement(blk,storages[i].getDatanodeDescriptor()); } assertTrue(pendingReplications.size() == 9); pendingReplications.increment(blk,DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(8))); assertTrue(pendingReplications.size() == 10); for (int i=0; i < 10; i++) { Block block=new Block(i,i,0); int numReplicas=pendingReplications.getNumReplicas(block); assertTrue(numReplicas == i); } assertTrue(pendingReplications.getTimedOutBlocks() == null); try { Thread.sleep(1000); } catch ( Exception e) { } for (int i=10; i < 15; i++) { Block block=new Block(i,i,0); pendingReplications.increment(block,DatanodeStorageInfo.toDatanodeDescriptors(DFSTestUtil.createDatanodeStorageInfos(i))); } assertTrue(pendingReplications.size() == 15); int loop=0; while (pendingReplications.size() > 0) { try { Thread.sleep(1000); } catch ( Exception e) { } loop++; } System.out.println("Had to wait for " + loop + " seconds for the lot to timeout"); assertEquals("Size of pendingReplications ",0,pendingReplications.size()); Block[] timedOut=pendingReplications.getTimedOutBlocks(); assertTrue(timedOut != null && timedOut.length == 15); for (int i=0; i < timedOut.length; i++) { assertTrue(timedOut[i].getBlockId() < 15); } pendingReplications.stop(); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestRBWBlockInvalidation

APIUtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test when a block's replica is removed from RBW folder in one of the * datanode, namenode should ask to invalidate that corrupted block and * schedule replication for one more replica for that under replicated block. */ @Test(timeout=600000) public void testBlockInvalidationWhenRBWReplicaMissedInDN() throws IOException, InterruptedException { assumeTrue(!Path.WINDOWS); Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY,2); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,300); conf.setLong(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,1); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FSDataOutputStream out=null; try { final FSNamesystem namesystem=cluster.getNamesystem(); FileSystem fs=cluster.getFileSystem(); Path testPath=new Path("/tmp/TestRBWBlockInvalidation","foo1"); out=fs.create(testPath,(short)2); out.writeBytes("HDFS-3157: " + testPath); out.hsync(); cluster.startDataNodes(conf,1,true,null,null,null); String bpid=namesystem.getBlockPoolId(); ExtendedBlock blk=DFSTestUtil.getFirstBlock(fs,testPath); Block block=blk.getLocalBlock(); DataNode dn=cluster.getDataNodes().get(0); File blockFile=DataNodeTestUtils.getBlockFile(dn,bpid,block); File metaFile=DataNodeTestUtils.getMetaFile(dn,bpid,block); assertTrue("Could not delete the block file from the RBW folder",blockFile.delete()); assertTrue("Could not delete the block meta file from the RBW folder",metaFile.delete()); out.close(); int liveReplicas=0; while (true) { if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) < 2) { LOG.info("Live Replicas after corruption: " + liveReplicas); break; } Thread.sleep(100); } assertEquals("There should be less than 2 replicas in the " + "liveReplicasMap",1,liveReplicas); while (true) { if ((liveReplicas=countReplicas(namesystem,blk).liveReplicas()) > 1) { LOG.info("Live Replicas after Rereplication: " + liveReplicas); break; } Thread.sleep(100); } assertEquals("There should be two live replicas",2,liveReplicas); while (true) { Thread.sleep(100); if (countReplicas(namesystem,blk).corruptReplicas() == 0) { LOG.info("Corrupt Replicas becomes 0"); break; } } } finally { if (out != null) { out.close(); } cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Regression test for HDFS-4799, a case where, upon restart, if there * were RWR replicas with out-of-date genstamps, the NN could accidentally * delete good replicas instead of the bad replicas. */ @Test(timeout=60000) public void testRWRInvalidation() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,RandomDeleterPolicy.class,BlockPlacementPolicy.class); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1); List testPaths=Lists.newArrayList(); for (int i=0; i < 10; i++) { testPaths.add(new Path("/test" + i)); } MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); try { List streams=Lists.newArrayList(); try { for ( Path path : testPaths) { FSDataOutputStream out=cluster.getFileSystem().create(path,(short)2); streams.add(out); out.writeBytes("old gs data\n"); out.hflush(); } DataNodeProperties oldGenstampNode=cluster.stopDataNode(0); for (int i=0; i < streams.size(); i++) { Path path=testPaths.get(i); FSDataOutputStream out=streams.get(i); out.writeBytes("new gs data\n"); out.hflush(); cluster.getFileSystem().setReplication(path,(short)1); out.close(); } LOG.info("=========================== restarting cluster"); DataNodeProperties otherNode=cluster.stopDataNode(0); cluster.restartNameNode(); cluster.restartDataNode(oldGenstampNode); cluster.waitActive(); cluster.restartDataNode(otherNode); cluster.waitActive(); cluster.getNameNode().getNamesystem().getBlockManager().computeInvalidateWork(2); cluster.triggerHeartbeats(); HATestUtil.waitForDNDeletions(cluster); cluster.triggerDeletionReports(); for ( Path path : testPaths) { String ret=DFSTestUtil.readFile(cluster.getFileSystem(),path); assertEquals("old gs data\n" + "new gs data\n",ret); } } finally { IOUtils.cleanup(LOG,streams.toArray(new Closeable[0])); } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicy

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is is a node outside of file system. * So the 1st replica can be placed on any node. * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 2nd replica, * @throws Exception */ @Test public void testChooseTarget5() throws Exception { DatanodeDescriptor writerDesc=DFSTestUtil.getDatanodeDescriptor("7.7.7.7","/d2/r4"); DatanodeStorageInfo[] targets; targets=chooseTarget(0,writerDesc); assertEquals(targets.length,0); targets=chooseTarget(1,writerDesc); assertEquals(targets.length,1); targets=chooseTarget(2,writerDesc); assertEquals(targets.length,2); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3,writerDesc); assertEquals(targets.length,3); assertTrue(isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[1])); }

InternalCallVerifier EqualityVerifier 
/** * Test for the chooseReplicaToDelete are processed based on * block locality and free space */ @Test public void testChooseReplicaToDelete() throws Exception { List replicaList=new ArrayList(); final Map> rackMap=new HashMap>(); dataNodes[0].setRemaining(4 * 1024 * 1024); replicaList.add(storages[0]); dataNodes[1].setRemaining(3 * 1024 * 1024); replicaList.add(storages[1]); dataNodes[2].setRemaining(2 * 1024 * 1024); replicaList.add(storages[2]); dataNodes[5].setRemaining(1 * 1024 * 1024); replicaList.add(storages[5]); for (int i=0; i < dataNodes.length; i++) { dataNodes[i].setLastUpdate(Time.now()); } List first=new ArrayList(); List second=new ArrayList(); replicator.splitNodesWithRack(replicaList,rackMap,first,second); assertEquals(2,first.size()); assertEquals(2,second.size()); DatanodeStorageInfo chosen=replicator.chooseReplicaToDelete(null,null,(short)3,first,second); assertEquals(chosen,storages[1]); replicator.adjustSetsWithChosenReplica(rackMap,first,second,chosen); assertEquals(0,first.size()); assertEquals(3,second.size()); chosen=replicator.chooseReplicaToDelete(null,null,(short)2,first,second); assertEquals(chosen,storages[5]); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testChooseTargetWithStaleNodes() throws Exception { dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1); namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); assertTrue(namenode.getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite()); DatanodeStorageInfo[] targets; targets=chooseTarget(1); assertEquals(targets.length,1); assertEquals(storages[1],targets[0]); Set excludedNodes=new HashSet(); excludedNodes.add(dataNodes[1]); List chosenNodes=new ArrayList(); targets=chooseTarget(1,chosenNodes,excludedNodes); assertEquals(targets.length,1); assertFalse(isOnSameRack(targets[0],dataNodes[0])); dataNodes[0].setLastUpdate(Time.now()); namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified * to be chosen. So the 1st replica should be placed on dataNodes[1], * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 2nd replica, * and the rest should be placed on the third rack. * @throws Exception */ @Test public void testChooseTarget3() throws Exception { updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertEquals(storages[1],targets[0]); targets=chooseTarget(2); assertEquals(targets.length,2); assertEquals(storages[1],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); assertEquals(storages[1],targets[0]); assertTrue(isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(4); assertEquals(targets.length,4); assertEquals(storages[1],targets[0]); for (int i=1; i < 4; i++) { assertFalse(isOnSameRack(targets[0],targets[i])); } assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); assertFalse(isOnSameRack(targets[1],targets[3])); updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale, * and when the number of replicas is less or equal to 3, all the healthy * datanodes should be returned by the chooseTarget method. When the number * of replicas is 4, a stale node should be included. * @throws Exception */ @Test public void testChooseTargetWithHalfStaleNodes() throws Exception { for (int i=0; i < 3; i++) { dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1); } namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); DatanodeStorageInfo[] targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertFalse(containsWithinRange(targets[0],dataNodes,0,2)); targets=chooseTarget(2); assertEquals(targets.length,2); assertFalse(containsWithinRange(targets[0],dataNodes,0,2)); assertFalse(containsWithinRange(targets[1],dataNodes,0,2)); targets=chooseTarget(3); assertEquals(targets.length,3); assertTrue(containsWithinRange(targets[0],dataNodes,3,5)); assertTrue(containsWithinRange(targets[1],dataNodes,3,5)); assertTrue(containsWithinRange(targets[2],dataNodes,3,5)); targets=chooseTarget(4); assertEquals(targets.length,4); assertTrue(containsWithinRange(dataNodes[3],targets,0,3)); assertTrue(containsWithinRange(dataNodes[4],targets,0,3)); assertTrue(containsWithinRange(dataNodes[5],targets,0,3)); for (int i=0; i < dataNodes.length; i++) { dataNodes[i].setLastUpdate(Time.now()); } namenode.getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,true); String[] hosts=new String[]{"host1","host2","host3","host4","host5","host6"}; String[] racks=new String[]{"/d1/r1","/d1/r1","/d1/r2","/d1/r2","/d2/r3","/d2/r3"}; MiniDFSCluster miniCluster=new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts).numDataNodes(hosts.length).build(); miniCluster.waitActive(); try { for (int i=0; i < 2; i++) { DataNode dn=miniCluster.getDataNodes().get(i); DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true); miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1); } miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); int numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes(); assertEquals(numStaleNodes,2); assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite()); DatanodeDescriptor staleNodeInfo=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(miniCluster.getDataNodes().get(0).getDatanodeId()); BlockPlacementPolicy replicator=miniCluster.getNameNode().getNamesystem().getBlockManager().getBlockPlacementPolicy(); DatanodeStorageInfo[] targets=replicator.chooseTarget(filename,3,staleNodeInfo,new ArrayList(),false,null,BLOCK_SIZE,StorageType.DEFAULT); assertEquals(targets.length,3); assertFalse(isOnSameRack(targets[0],staleNodeInfo)); for (int i=0; i < 4; i++) { DataNode dn=miniCluster.getDataNodes().get(i); DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,true); miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now() - staleInterval - 1); } miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes(); assertEquals(numStaleNodes,4); assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite()); targets=replicator.chooseTarget(filename,3,staleNodeInfo,new ArrayList(),false,null,BLOCK_SIZE,StorageType.DEFAULT); assertEquals(targets.length,3); assertTrue(isOnSameRack(targets[0],staleNodeInfo)); for (int i=2; i < 4; i++) { DataNode dn=miniCluster.getDataNodes().get(i); DataNodeTestUtils.setHeartbeatsDisabledForTests(dn,false); miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dn.getDatanodeId()).setLastUpdate(Time.now()); } miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getHeartbeatManager().heartbeatCheck(); numStaleNodes=miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getNumStaleNodes(); assertEquals(numStaleNodes,2); assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().shouldAvoidStaleDataNodesForWrite()); targets=chooseTarget(3,staleNodeInfo); assertEquals(targets.length,3); assertFalse(isOnSameRack(targets[0],staleNodeInfo)); } finally { miniCluster.shutdown(); } }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but none of the nodes on rack 1 * is qualified to be chosen. So the 1st replica should be placed on either * rack 2 or rack 3. * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 1st replica, * @throws Exception */ @Test public void testChoooseTarget4() throws Exception { for (int i=0; i < 2; i++) { updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); } DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertFalse(isOnSameRack(targets[0],dataNodes[0])); targets=chooseTarget(2); assertEquals(targets.length,2); assertFalse(isOnSameRack(targets[0],dataNodes[0])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); for (int i=0; i < 3; i++) { assertFalse(isOnSameRack(targets[i],dataNodes[0])); } assertTrue(isOnSameRack(targets[0],targets[1]) || isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[2])); for (int i=0; i < 2; i++) { updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests whether the value returned by * DFSUtil.getReplWorkMultiplier() is positive, * and whether an IllegalArgumentException will be thrown * when a non-positive value is retrieved */ @Test public void testGetReplWorkMultiplier(){ Configuration conf=new Configuration(); int blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf); assertTrue(blocksReplWorkMultiplier > 0); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3"); blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf); assertEquals(blocksReplWorkMultiplier,3); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1"); exception.expect(IllegalArgumentException.class); blocksReplWorkMultiplier=DFSUtil.getReplWorkMultiplier(conf); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, when dataNodes[0] is already chosen. * So the 1st replica can be placed on random rack. * the 2nd replica should be placed on different node by same rack as * the 1st replica. The 3rd replica can be placed randomly. * @throws Exception */ @Test public void testRereplicate1() throws Exception { List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertFalse(isOnSameRack(targets[0],dataNodes[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(targets[0],dataNodes[0])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3,chosenNodes); assertEquals(targets.length,3); assertTrue(isOnSameRack(targets[0],dataNodes[0])); assertFalse(isOnSameRack(targets[0],targets[2])); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0]. So the 1st replica should be * placed on dataNodes[0], the 2nd replica should be placed on * different rack and third should be placed on different node * of rack chosen for 2nd node. * The only excpetion is when the numOfReplicas is 2, * the 1st is on dataNodes[0] and the 2nd is on a different rack. * @throws Exception */ @Test public void testChooseTarget1() throws Exception { updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,4,0); DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertEquals(storages[0],targets[0]); targets=chooseTarget(2); assertEquals(targets.length,2); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); assertTrue(isOnSameRack(targets[1],targets[2])); targets=chooseTarget(4); assertEquals(targets.length,4); assertEquals(storages[0],targets[0]); assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); assertFalse(isOnSameRack(targets[0],targets[2])); updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but the dataNodes[1] is * not allowed to be chosen. So the 1st replica should be * placed on dataNodes[0], the 2nd replica should be placed on a different * rack, the 3rd should be on same rack as the 2nd replica, and the rest * should be placed on a third rack. * @throws Exception */ @Test public void testChooseTarget2() throws Exception { Set excludedNodes; DatanodeStorageInfo[] targets; List chosenNodes=new ArrayList(); excludedNodes=new HashSet(); excludedNodes.add(dataNodes[1]); targets=chooseTarget(0,chosenNodes,excludedNodes); assertEquals(targets.length,0); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets=chooseTarget(1,chosenNodes,excludedNodes); assertEquals(targets.length,1); assertEquals(storages[0],targets[0]); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets=chooseTarget(2,chosenNodes,excludedNodes); assertEquals(targets.length,2); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets=chooseTarget(3,chosenNodes,excludedNodes); assertEquals(targets.length,3); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); assertTrue(isOnSameRack(targets[1],targets[2])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); targets=chooseTarget(4,chosenNodes,excludedNodes); assertEquals(targets.length,4); assertEquals(storages[0],targets[0]); for (int i=1; i < 4; i++) { assertFalse(isOnSameRack(targets[0],targets[i])); } assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); assertFalse(isOnSameRack(targets[1],targets[3])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); chosenNodes.add(storages[2]); targets=replicator.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT); System.out.println("targets=" + Arrays.asList(targets)); assertEquals(2,targets.length); int i=0; for (; i < targets.length && !storages[2].equals(targets[i]); i++) ; assertTrue(i < targets.length); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, * when dataNodes[0] and dataNodes[2] are already chosen. * So the 1st replica should be placed on the rack that the writer resides. * the rest replicas can be placed randomly, * @throws Exception */ @Test public void testRereplicate3() throws Exception { List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); chosenNodes.add(storages[2]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertTrue(isOnSameRack(targets[0],dataNodes[0])); assertFalse(isOnSameRack(targets[0],dataNodes[2])); targets=chooseTarget(1,dataNodes[2],chosenNodes); assertEquals(targets.length,1); assertTrue(isOnSameRack(targets[0],dataNodes[2])); assertFalse(isOnSameRack(targets[0],dataNodes[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(targets[0],dataNodes[0])); targets=chooseTarget(2,dataNodes[2],chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(targets[0],dataNodes[2])); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, it tries to choose more targets than available nodes and * check the result. * @throws Exception */ @Test public void testChooseTargetWithMoreThanAvailableNodes() throws Exception { for (int i=0; i < 2; i++) { updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); } final LogVerificationAppender appender=new LogVerificationAppender(); final Logger logger=Logger.getRootLogger(); logger.addAppender(appender); DatanodeStorageInfo[] targets=chooseTarget(NUM_OF_DATANODES); assertEquals(targets.length,NUM_OF_DATANODES - 2); final List log=appender.getLog(); assertNotNull(log); assertFalse(log.size() == 0); final LoggingEvent lastLogEntry=log.get(log.size() - 1); assertTrue(Level.WARN.isGreaterOrEqual(lastLogEntry.getLevel())); assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2")); for (int i=0; i < 2; i++) { updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests whether the default value returned by * DFSUtil.getInvalidateWorkPctPerIteration() is positive, * and whether an IllegalArgumentException will be thrown * when 0.0f is retrieved */ @Test public void testGetInvalidateWorkPctPerIteration(){ Configuration conf=new Configuration(); float blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf); assertTrue(blocksInvalidateWorkPct > 0); conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.5f"); blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf); assertEquals(blocksInvalidateWorkPct,0.5f,blocksInvalidateWorkPct * 1e-7); conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"1.0f"); blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf); assertEquals(blocksInvalidateWorkPct,1.0f,blocksInvalidateWorkPct * 1e-7); conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,"0.0f"); exception.expect(IllegalArgumentException.class); blocksInvalidateWorkPct=DFSUtil.getInvalidateWorkPctPerIteration(conf); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, * when dataNodes[0] and dataNodes[1] are already chosen. * So the 1st replica should be placed on a different rack than rack 1. * the rest replicas can be placed randomly, * @throws Exception */ @Test public void testRereplicate2() throws Exception { List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); chosenNodes.add(storages[1]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertFalse(isOnSameRack(targets[0],dataNodes[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertFalse(isOnSameRack(targets[0],dataNodes[0])); assertFalse(isOnSameRack(targets[1],dataNodes[0])); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyConsiderLoad

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests that chooseTarget with considerLoad set to true correctly calculates * load with decommissioned nodes. */ @Test public void testChooseTargetWithDecomNodes() throws IOException { namenode.getNamesystem().writeLock(); try { String blockPoolId=namenode.getNamesystem().getBlockPoolId(); dnManager.handleHeartbeat(dnrList.get(3),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),blockPoolId,dataNodes[3].getCacheCapacity(),dataNodes[3].getCacheRemaining(),2,0,0); dnManager.handleHeartbeat(dnrList.get(4),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[4]),blockPoolId,dataNodes[4].getCacheCapacity(),dataNodes[4].getCacheRemaining(),4,0,0); dnManager.handleHeartbeat(dnrList.get(5),BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[5]),blockPoolId,dataNodes[5].getCacheCapacity(),dataNodes[5].getCacheRemaining(),4,0,0); final int load=2 + 4 + 4; FSNamesystem fsn=namenode.getNamesystem(); assertEquals((double)load / 6,fsn.getInServiceXceiverAverage(),EPSILON); for (int i=0; i < 3; i++) { DatanodeDescriptor d=dnManager.getDatanode(dnrList.get(i)); dnManager.startDecommission(d); d.setDecommissioned(); } assertEquals((double)load / 3,fsn.getInServiceXceiverAverage(),EPSILON); DatanodeStorageInfo[] targets=namenode.getNamesystem().getBlockManager().getBlockPlacementPolicy().chooseTarget("testFile.txt",3,dataNodes[0],new ArrayList(),false,null,1024,StorageType.DEFAULT); assertEquals(3,targets.length); Set targetSet=new HashSet(Arrays.asList(targets)); for (int i=3; i < storages.length; i++) { assertTrue(targetSet.contains(storages[i])); } } finally { dataNodes[0].stopDecommission(); dataNodes[1].stopDecommission(); dataNodes[2].stopDecommission(); namenode.getNamesystem().writeUnlock(); } NameNode.LOG.info("Done working on it"); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestReplicationPolicyWithNodeGroup

APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testChooseTargetWithDependencies() throws Exception { for (int i=0; i < NUM_OF_DATANODES; i++) { cluster.remove(dataNodes[i]); } for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) { DatanodeDescriptor node=dataNodesInMoreTargetsCase[i]; if (cluster.contains(node)) { cluster.remove(node); } } Host2NodesMap host2DatanodeMap=namenode.getNamesystem().getBlockManager().getDatanodeManager().getHost2DatanodeMap(); for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) { cluster.add(dataNodesForDependencies[i]); host2DatanodeMap.add(dataNodesForDependencies[i]); } dataNodesForDependencies[1].addDependentHostName(dataNodesForDependencies[2].getHostName()); dataNodesForDependencies[2].addDependentHostName(dataNodesForDependencies[1].getHostName()); dataNodesForDependencies[3].addDependentHostName(dataNodesForDependencies[4].getHostName()); dataNodesForDependencies[4].addDependentHostName(dataNodesForDependencies[3].getHostName()); for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) { updateHeartbeatWithUsage(dataNodesForDependencies[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); } List chosenNodes=new ArrayList(); DatanodeStorageInfo[] targets; Set excludedNodes=new HashSet(); excludedNodes.add(dataNodesForDependencies[5]); targets=chooseTarget(3,dataNodesForDependencies[1],chosenNodes,excludedNodes); assertEquals(targets.length,2); assertEquals(targets[0],storagesForDependencies[1]); assertTrue(targets[1].equals(storagesForDependencies[3]) || targets[1].equals(storagesForDependencies[4])); assertEquals(excludedNodes.size(),NUM_OF_DATANODES_FOR_DEPENDENCIES); for (int i=0; i < NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) { assertTrue(excludedNodes.contains(dataNodesForDependencies[i])); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, * when dataNodes[0] and dataNodes[3] are already chosen. * So the 1st replica should be placed on the rack that the writer resides. * the rest replicas can be placed randomly, * @throws Exception */ @Test public void testRereplicate3() throws Exception { setupDataNodeCapacity(); List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); chosenNodes.add(storages[3]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertTrue(isOnSameRack(dataNodes[0],targets[0])); assertFalse(isOnSameRack(dataNodes[3],targets[0])); targets=chooseTarget(1,dataNodes[3],chosenNodes); assertEquals(targets.length,1); assertTrue(isOnSameRack(dataNodes[3],targets[0])); assertFalse(isOnSameNodeGroup(dataNodes[3],targets[0])); assertFalse(isOnSameRack(dataNodes[0],targets[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(dataNodes[0],targets[0])); assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0])); targets=chooseTarget(2,dataNodes[3],chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(dataNodes[3],targets[0])); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, * when dataNodes[0] and dataNodes[1] are already chosen. * So the 1st replica should be placed on a different rack of rack 1. * the rest replicas can be placed randomly, * @throws Exception */ @Test public void testRereplicate2() throws Exception { setupDataNodeCapacity(); List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); chosenNodes.add(storages[1]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertFalse(isOnSameRack(dataNodes[0],targets[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertFalse(isOnSameRack(dataNodes[0],targets[0]) && isOnSameRack(dataNodes[0],targets[1])); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This testcase tests re-replication, when dataNodes[0] is already chosen. * So the 1st replica can be placed on random rack. * the 2nd replica should be placed on different node and nodegroup by same rack as * the 1st replica. The 3rd replica can be placed randomly. * @throws Exception */ @Test public void testRereplicate1() throws Exception { setupDataNodeCapacity(); List chosenNodes=new ArrayList(); chosenNodes.add(storages[0]); DatanodeStorageInfo[] targets; targets=chooseTarget(0,chosenNodes); assertEquals(targets.length,0); targets=chooseTarget(1,chosenNodes); assertEquals(targets.length,1); assertFalse(isOnSameRack(dataNodes[0],targets[0])); targets=chooseTarget(2,chosenNodes); assertEquals(targets.length,2); assertTrue(isOnSameRack(dataNodes[0],targets[0])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3,chosenNodes); assertEquals(targets.length,3); assertTrue(isOnSameRack(dataNodes[0],targets[0])); assertFalse(isOnSameNodeGroup(dataNodes[0],targets[0])); assertFalse(isOnSameRack(targets[0],targets[2])); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test replica placement policy in case of boundary topology. * Rack 2 has only 1 node group & can't be placed with two replicas * The 1st replica will be placed on writer. * The 2nd replica should be placed on a different rack * The 3rd replica should be placed on the same rack with writer, but on a * different node group. */ @Test public void testChooseTargetsOnBoundaryTopology() throws Exception { for (int i=0; i < NUM_OF_DATANODES; i++) { cluster.remove(dataNodes[i]); } for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) { cluster.add(dataNodesInBoundaryCase[i]); } for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) { updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); } DatanodeStorageInfo[] targets; targets=chooseTarget(0,dataNodesInBoundaryCase[0]); assertEquals(targets.length,0); targets=chooseTarget(1,dataNodesInBoundaryCase[0]); assertEquals(targets.length,1); targets=chooseTarget(2,dataNodesInBoundaryCase[0]); assertEquals(targets.length,2); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3,dataNodesInBoundaryCase[0]); assertEquals(targets.length,3); assertTrue(checkTargetsOnDifferentNodeGroup(targets)); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is is a node outside of file system. * So the 1st replica can be placed on any node. * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 2nd replica, * @throws Exception */ @Test public void testChooseTarget5() throws Exception { setupDataNodeCapacity(); DatanodeStorageInfo[] targets; targets=chooseTarget(0,NODE); assertEquals(targets.length,0); targets=chooseTarget(1,NODE); assertEquals(targets.length,1); targets=chooseTarget(2,NODE); assertEquals(targets.length,2); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3,NODE); assertEquals(targets.length,3); assertTrue(isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[1])); verifyNoTwoTargetsOnSameNodeGroup(targets); }

InternalCallVerifier EqualityVerifier 
/** * Test for the chooseReplicaToDelete are processed based on * block locality and free space */ @Test public void testChooseReplicaToDelete() throws Exception { List replicaList=new ArrayList(); final Map> rackMap=new HashMap>(); dataNodes[0].setRemaining(4 * 1024 * 1024); replicaList.add(storages[0]); dataNodes[1].setRemaining(3 * 1024 * 1024); replicaList.add(storages[1]); dataNodes[2].setRemaining(2 * 1024 * 1024); replicaList.add(storages[2]); dataNodes[5].setRemaining(1 * 1024 * 1024); replicaList.add(storages[5]); List first=new ArrayList(); List second=new ArrayList(); replicator.splitNodesWithRack(replicaList,rackMap,first,second); assertEquals(3,first.size()); assertEquals(1,second.size()); DatanodeStorageInfo chosen=replicator.chooseReplicaToDelete(null,null,(short)3,first,second); assertEquals(chosen,storages[1]); replicator.adjustSetsWithChosenReplica(rackMap,first,second,chosen); assertEquals(2,first.size()); assertEquals(1,second.size()); chosen=replicator.chooseReplicaToDelete(null,null,(short)2,first,second); assertEquals(chosen,storages[2]); replicator.adjustSetsWithChosenReplica(rackMap,first,second,chosen); assertEquals(0,first.size()); assertEquals(2,second.size()); chosen=replicator.chooseReplicaToDelete(null,null,(short)1,first,second); assertEquals(chosen,storages[5]); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but the dataNodes[1] is * not allowed to be chosen. So the 1st replica should be * placed on dataNodes[0], the 2nd replica should be placed on a different * rack, the 3rd should be on same rack as the 2nd replica but in different * node group, and the rest should be placed on a third rack. * @throws Exception */ @Test public void testChooseTarget2() throws Exception { DatanodeStorageInfo[] targets; BlockPlacementPolicyDefault repl=(BlockPlacementPolicyDefault)replicator; List chosenNodes=new ArrayList(); Set excludedNodes=new HashSet(); excludedNodes.add(dataNodes[1]); targets=repl.chooseTarget(filename,4,dataNodes[0],chosenNodes,false,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT); assertEquals(targets.length,4); assertEquals(storages[0],targets[0]); assertTrue(cluster.isNodeGroupAware()); for (int i=1; i < 4; i++) { assertFalse(isOnSameNodeGroup(targets[0],targets[i])); } assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); assertFalse(isOnSameRack(targets[1],targets[3])); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.add(dataNodes[1]); chosenNodes.add(storages[2]); targets=repl.chooseTarget(filename,1,dataNodes[0],chosenNodes,true,excludedNodes,BLOCK_SIZE,StorageType.DEFAULT); System.out.println("targets=" + Arrays.asList(targets)); assertEquals(2,targets.length); int i=0; for (; i < targets.length && !storages[2].equals(targets[i]); i++) ; assertTrue(i < targets.length); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but dataNodes[0] is not qualified * to be chosen. So the 1st replica should be placed on dataNodes[1], * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 2nd replica but in different nodegroup, * and the rest should be placed on the third rack. * @throws Exception */ @Test public void testChooseTarget3() throws Exception { updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertEquals(storages[1],targets[0]); targets=chooseTarget(2); assertEquals(targets.length,2); assertEquals(storages[1],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); assertEquals(storages[1],targets[0]); assertTrue(isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(4); assertEquals(targets.length,4); assertEquals(storages[1],targets[0]); assertTrue(cluster.isNodeGroupAware()); verifyNoTwoTargetsOnSameNodeGroup(targets); assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0], but none of the nodes on rack 1 * is qualified to be chosen. So the 1st replica should be placed on either * rack 2 or rack 3. * the 2nd replica should be placed on a different rack, * the 3rd replica should be placed on the same rack as the 1st replica, but * in different node group. * @throws Exception */ @Test public void testChooseTarget4() throws Exception { for (int i=0; i < 3; i++) { updateHeartbeatWithUsage(dataNodes[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,(HdfsConstants.MIN_BLOCKS_FOR_WRITE - 1) * BLOCK_SIZE,0L,0L,0L,0,0); } DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertFalse(isOnSameRack(dataNodes[0],targets[0])); targets=chooseTarget(2); assertEquals(targets.length,2); assertFalse(isOnSameRack(dataNodes[0],targets[0])); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); for (int i=0; i < 3; i++) { assertFalse(isOnSameRack(dataNodes[0],targets[i])); } verifyNoTwoTargetsOnSameNodeGroup(targets); assertTrue(isOnSameRack(targets[0],targets[1]) || isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameRack(targets[0],targets[2])); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test replica placement policy in case of targets more than number of * NodeGroups. * The 12-nodes cluster only has 6 NodeGroups, but in some cases, like: * placing submitted job file, there is requirement to choose more (10) * targets for placing replica. We should test it can return 6 targets. */ @Test public void testChooseMoreTargetsThanNodeGroups() throws Exception { for (int i=0; i < NUM_OF_DATANODES; i++) { cluster.remove(dataNodes[i]); } for (int i=0; i < NUM_OF_DATANODES_BOUNDARY; i++) { DatanodeDescriptor node=dataNodesInBoundaryCase[i]; if (cluster.contains(node)) { cluster.remove(node); } } for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) { cluster.add(dataNodesInMoreTargetsCase[i]); } for (int i=0; i < NUM_OF_DATANODES_MORE_TARGETS; i++) { updateHeartbeatWithUsage(dataNodesInMoreTargetsCase[i],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); } DatanodeStorageInfo[] targets; targets=chooseTarget(3,dataNodesInMoreTargetsCase[0]); assertEquals(targets.length,3); assertTrue(checkTargetsOnDifferentNodeGroup(targets)); targets=chooseTarget(10,dataNodesInMoreTargetsCase[0]); assertTrue(checkTargetsOnDifferentNodeGroup(targets)); assertEquals(targets.length,6); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * In this testcase, client is dataNodes[0]. So the 1st replica should be * placed on dataNodes[0], the 2nd replica should be placed on * different rack and third should be placed on different node (and node group) * of rack chosen for 2nd node. * The only excpetion is when the numOfReplicas is 2, * the 1st is on dataNodes[0] and the 2nd is on a different rack. * @throws Exception */ @Test public void testChooseTarget1() throws Exception { updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,4,0); DatanodeStorageInfo[] targets; targets=chooseTarget(0); assertEquals(targets.length,0); targets=chooseTarget(1); assertEquals(targets.length,1); assertEquals(storages[0],targets[0]); targets=chooseTarget(2); assertEquals(targets.length,2); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); targets=chooseTarget(3); assertEquals(targets.length,3); assertEquals(storages[0],targets[0]); assertFalse(isOnSameRack(targets[0],targets[1])); assertTrue(isOnSameRack(targets[1],targets[2])); assertFalse(isOnSameNodeGroup(targets[1],targets[2])); targets=chooseTarget(4); assertEquals(targets.length,4); assertEquals(storages[0],targets[0]); assertTrue(isOnSameRack(targets[1],targets[2]) || isOnSameRack(targets[2],targets[3])); assertFalse(isOnSameRack(targets[0],targets[2])); verifyNoTwoTargetsOnSameNodeGroup(targets); updateHeartbeatWithUsage(dataNodes[0],2 * HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,HdfsConstants.MIN_BLOCKS_FOR_WRITE * BLOCK_SIZE,0L,0L,0L,0,0); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestUnderReplicatedBlockQueues

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that adding blocks with different replication counts puts them * into different queues * @throws Throwable if something goes wrong */ @Test public void testBlockPriorities() throws Throwable { UnderReplicatedBlocks queues=new UnderReplicatedBlocks(); Block block1=new Block(1); Block block2=new Block(2); Block block_very_under_replicated=new Block(3); Block block_corrupt=new Block(4); assertAdded(queues,block1,1,0,3); assertEquals(1,queues.getUnderReplicatedBlockCount()); assertEquals(1,queues.size()); assertInLevel(queues,block1,UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY); assertFalse(queues.add(block1,1,0,3)); assertAdded(queues,block2,2,0,3); assertEquals(2,queues.getUnderReplicatedBlockCount()); assertEquals(2,queues.size()); assertInLevel(queues,block2,UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED); assertAdded(queues,block_corrupt,0,0,3); assertEquals(3,queues.size()); assertEquals(2,queues.getUnderReplicatedBlockCount()); assertEquals(1,queues.getCorruptBlockSize()); assertInLevel(queues,block_corrupt,UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS); assertAdded(queues,block_very_under_replicated,4,0,25); assertInLevel(queues,block_very_under_replicated,UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED); }

Class: org.apache.hadoop.hdfs.server.blockmanagement.TestUnderReplicatedBlocks

EqualityVerifier 
@Test(timeout=60000) public void testSetrepIncWithUnderReplicatedBlocks() throws Exception { Configuration conf=new HdfsConfiguration(); final short REPLICATION_FACTOR=2; final String FILE_NAME="/testFile"; final Path FILE_PATH=new Path(FILE_NAME); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build(); try { final FileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,FILE_PATH,1L,REPLICATION_FACTOR,1L); DFSTestUtil.waitReplication(fs,FILE_PATH,REPLICATION_FACTOR); final BlockManager bm=cluster.getNamesystem().getBlockManager(); ExtendedBlock b=DFSTestUtil.getFirstBlock(fs,FILE_PATH); DatanodeDescriptor dn=bm.blocksMap.getStorages(b.getLocalBlock()).iterator().next().getDatanodeDescriptor(); bm.addToInvalidates(b.getLocalBlock(),dn); BlockManagerTestUtil.computeAllPendingWork(bm); DataNodeTestUtils.triggerHeartbeat(cluster.getDataNode(dn.getIpcPort())); Thread.sleep(5000); bm.blocksMap.removeNode(b.getLocalBlock(),dn); FsShell shell=new FsShell(conf); assertEquals(0,shell.run(new String[]{"-setrep","-w",Integer.toString(1 + REPLICATION_FACTOR),FILE_NAME})); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.common.TestGetUriFromString

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test for an OS dependent absolute paths. * @throws IOException */ @Test public void testAbsolutePathAsURI() throws IOException { URI u=null; u=Util.stringAsURI(ABSOLUTE_PATH_WINDOWS); assertNotNull("Uri should not be null for Windows path" + ABSOLUTE_PATH_WINDOWS,u); assertEquals(URI_FILE_SCHEMA,u.getScheme()); u=Util.stringAsURI(ABSOLUTE_PATH_UNIX); assertNotNull("Uri should not be null for Unix path" + ABSOLUTE_PATH_UNIX,u); assertEquals(URI_FILE_SCHEMA,u.getScheme()); }

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test for a URI * @throws IOException */ @Test public void testURI() throws IOException { LOG.info("Testing correct Unix URI: " + URI_UNIX); URI u=Util.stringAsURI(URI_UNIX); LOG.info("Uri: " + u); assertNotNull("Uri should not be null at this point",u); assertEquals(URI_FILE_SCHEMA,u.getScheme()); assertEquals(URI_PATH_UNIX,u.getPath()); LOG.info("Testing correct windows URI: " + URI_WINDOWS); u=Util.stringAsURI(URI_WINDOWS); LOG.info("Uri: " + u); assertNotNull("Uri should not be null at this point",u); assertEquals(URI_FILE_SCHEMA,u.getScheme()); assertEquals(URI_PATH_WINDOWS.replace("%20"," "),u.getPath()); }

Class: org.apache.hadoop.hdfs.server.common.TestJspHelper

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetUgiFromToken() throws IOException { conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/"); ServletContext context=mock(ServletContext.class); String realUser="TheDoctor"; String user="TheNurse"; conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi; HttpServletRequest request; Text ownerText=new Text(user); DelegationTokenIdentifier dtId=new DelegationTokenIdentifier(ownerText,ownerText,new Text(realUser)); Token token=new Token(dtId,new DummySecretManager(0,0,0,0)); String tokenString=token.encodeToUrlString(); request=getMockRequest(null,null,null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromToken(ugi); request=getMockRequest(realUser,null,null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromToken(ugi); request=getMockRequest("rogue",null,null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromToken(ugi); request=getMockRequest(null,user,null); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromToken(ugi); request=getMockRequest(null,null,"rogue"); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Usernames not matched: name=rogue != expected=" + user,ioe.getMessage()); } request=getMockRequest(null,user,"rogue"); when(request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME)).thenReturn(tokenString); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Usernames not matched: name=rogue != expected=" + user,ioe.getMessage()); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetNonProxyUgi() throws IOException { conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/"); ServletContext context=mock(ServletContext.class); String realUser="TheDoctor"; String user="TheNurse"; conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi; HttpServletRequest request; request=getMockRequest(null,null,null); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage()); } request=getMockRequest(null,realUser,null); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage()); } request=getMockRequest(realUser,null,null); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNull(ugi.getRealUser()); Assert.assertEquals(ugi.getShortUserName(),realUser); checkUgiFromAuth(ugi); request=getMockRequest(realUser,realUser,null); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNull(ugi.getRealUser()); Assert.assertEquals(ugi.getShortUserName(),realUser); checkUgiFromAuth(ugi); request=getMockRequest(realUser,user,null); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Usernames not matched: name=" + user + " != expected="+ realUser,ioe.getMessage()); } }

EqualityVerifier 
@Test public void testRemoteAddrWithTrustedProxy(){ assertEquals(clientAddr,getRemoteAddr(clientAddr,proxyAddr,true)); assertEquals(clientAddr,getRemoteAddr(chainedClientAddr,proxyAddr,true)); }

EqualityVerifier 
@Test public void testRemoteAddrWithUntrustedProxy(){ assertEquals(proxyAddr,getRemoteAddr(clientAddr,proxyAddr,false)); }

EqualityVerifier 
@Test public void testRemoteAddr(){ assertEquals(clientAddr,getRemoteAddr(clientAddr,null,false)); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetProxyUgi() throws IOException { conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://localhost:4321/"); ServletContext context=mock(ServletContext.class); String realUser="TheDoctor"; String user="TheNurse"; conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(realUser),"*"); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(realUser),"*"); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi; HttpServletRequest request; request=getMockRequest(null,null,user); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage()); } request=getMockRequest(null,realUser,user); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Security enabled but user not authenticated by filter",ioe.getMessage()); } request=getMockRequest(realUser,null,user); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromAuth(ugi); request=getMockRequest(realUser,realUser,user); ugi=JspHelper.getUGI(context,request,conf); Assert.assertNotNull(ugi.getRealUser()); Assert.assertEquals(ugi.getRealUser().getShortUserName(),realUser); Assert.assertEquals(ugi.getShortUserName(),user); checkUgiFromAuth(ugi); request=getMockRequest(realUser,user,user); try { JspHelper.getUGI(context,request,conf); Assert.fail("bad request allowed"); } catch ( IOException ioe) { Assert.assertEquals("Usernames not matched: name=" + user + " != expected="+ realUser,ioe.getMessage()); } try { request=getMockRequest(user,null,realUser); JspHelper.getUGI(context,request,conf); Assert.fail("bad proxy request allowed"); } catch ( AuthorizationException ae) { Assert.assertEquals("User: " + user + " is not allowed to impersonate "+ realUser,ae.getMessage()); } try { request=getMockRequest(user,user,realUser); JspHelper.getUGI(context,request,conf); Assert.fail("bad proxy request allowed"); } catch ( AuthorizationException ae) { Assert.assertEquals("User: " + user + " is not allowed to impersonate "+ realUser,ae.getMessage()); } }

EqualityVerifier 
@Test public void testRemoteAddrWithTrustedProxyAndEmptyClient(){ assertEquals(proxyAddr,getRemoteAddr(null,proxyAddr,true)); assertEquals(proxyAddr,getRemoteAddr("",proxyAddr,true)); }

Class: org.apache.hadoop.hdfs.server.datanode.BlockReportTestBase

InternalCallVerifier EqualityVerifier 
@Test(timeout=300000) public void blockReport_09() throws IOException { final String METHOD_NAME=GenericTestUtils.getMethodName(); Path filePath=new Path("/" + METHOD_NAME + ".dat"); final int DN_N1=DN_N0 + 1; final int bytesChkSum=1024 * 1000; conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,bytesChkSum); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,6 * bytesChkSum); shutDownCluster(); startUpCluster(); try { writeFile(METHOD_NAME,12 * bytesChkSum,filePath); Block bl=findBlock(filePath,12 * bytesChkSum); BlockChecker bc=new BlockChecker(filePath); bc.start(); waitForTempReplica(bl,DN_N1); DataNode dn=cluster.getDataNodes().get(DN_N1); String poolId=cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId); StorageBlockReport[] reports=getBlockReports(dn,poolId,true,true); sendBlockReports(dnR,poolId,reports); printStats(); assertEquals("Wrong number of PendingReplication blocks",2,cluster.getNamesystem().getPendingReplicationBlocks()); try { bc.join(); } catch ( InterruptedException e) { } } finally { resetConfiguration(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * The test set the configuration parameters for a large block size and * restarts initiated single-node cluster. * Then it writes a file > block_size and closes it. * The second datanode is started in the cluster. * As soon as the replication process is started and at least one TEMPORARY * replica is found test forces BlockReport process and checks * if the TEMPORARY replica isn't reported on it. * Eventually, the configuration is being restored into the original state. * @throws IOException in case of an error */ @Test(timeout=300000) public void blockReport_08() throws IOException { final String METHOD_NAME=GenericTestUtils.getMethodName(); Path filePath=new Path("/" + METHOD_NAME + ".dat"); final int DN_N1=DN_N0 + 1; final int bytesChkSum=1024 * 1000; conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,bytesChkSum); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,6 * bytesChkSum); shutDownCluster(); startUpCluster(); try { ArrayList blocks=writeFile(METHOD_NAME,12 * bytesChkSum,filePath); Block bl=findBlock(filePath,12 * bytesChkSum); BlockChecker bc=new BlockChecker(filePath); bc.start(); waitForTempReplica(bl,DN_N1); DataNode dn=cluster.getDataNodes().get(DN_N1); String poolId=cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId); StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false); sendBlockReports(dnR,poolId,reports); printStats(); assertEquals("Wrong number of PendingReplication blocks",blocks.size(),cluster.getNamesystem().getPendingReplicationBlocks()); try { bc.join(); } catch ( InterruptedException e) { } } finally { resetConfiguration(); } }

InternalCallVerifier EqualityVerifier 
/** * Test creates a file and closes it. * The second datanode is started in the cluster. * As soon as the replication process is completed test runs * Block report and checks that no underreplicated blocks are left * @throws IOException in case of an error */ @Test(timeout=300000) public void blockReport_06() throws Exception { final String METHOD_NAME=GenericTestUtils.getMethodName(); Path filePath=new Path("/" + METHOD_NAME + ".dat"); final int DN_N1=DN_N0 + 1; writeFile(METHOD_NAME,FILE_SIZE,filePath); startDNandWait(filePath,true); DataNode dn=cluster.getDataNodes().get(DN_N1); String poolId=cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId); StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false); sendBlockReports(dnR,poolId,reports); printStats(); assertEquals("Wrong number of PendingReplication Blocks",0,cluster.getNamesystem().getUnderReplicatedBlocks()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test write a file, verifies and closes it. Then a couple of random blocks * is removed and BlockReport is forced; the FSNamesystem is pushed to * recalculate required DN's activities such as replications and so on. * The number of missing and under-replicated blocks should be the same in * case of a single-DN cluster. * @throws IOException in case of errors */ @Test(timeout=300000) public void blockReport_02() throws IOException { final String METHOD_NAME=GenericTestUtils.getMethodName(); LOG.info("Running test " + METHOD_NAME); Path filePath=new Path("/" + METHOD_NAME + ".dat"); DFSTestUtil.createFile(fs,filePath,FILE_SIZE,REPL_FACTOR,rand.nextLong()); File dataDir=new File(cluster.getDataDirectory()); assertTrue(dataDir.isDirectory()); List blocks2Remove=new ArrayList(); List removedIndex=new ArrayList(); List lBlocks=cluster.getNameNodeRpc().getBlockLocations(filePath.toString(),FILE_START,FILE_SIZE).getLocatedBlocks(); while (removedIndex.size() != 2) { int newRemoveIndex=rand.nextInt(lBlocks.size()); if (!removedIndex.contains(newRemoveIndex)) removedIndex.add(newRemoveIndex); } for ( Integer aRemovedIndex : removedIndex) { blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock()); } if (LOG.isDebugEnabled()) { LOG.debug("Number of blocks allocated " + lBlocks.size()); } final DataNode dn0=cluster.getDataNodes().get(DN_N0); for ( ExtendedBlock b : blocks2Remove) { if (LOG.isDebugEnabled()) { LOG.debug("Removing the block " + b.getBlockName()); } for ( File f : findAllFiles(dataDir,new MyFileFilter(b.getBlockName(),true))) { DataNodeTestUtils.getFSDataset(dn0).unfinalizeBlock(b); if (!f.delete()) { LOG.warn("Couldn't delete " + b.getBlockName()); } else { LOG.debug("Deleted file " + f.toString()); } } } waitTil(DN_RESCAN_EXTRA_WAIT); String poolId=cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR=dn0.getDNRegistrationForBP(poolId); StorageBlockReport[] reports=getBlockReports(dn0,poolId,false,false); sendBlockReports(dnR,poolId,reports); BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem().getBlockManager()); printStats(); assertEquals("Wrong number of MissingBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getMissingBlocksCount()); assertEquals("Wrong number of UnderReplicatedBlocks is found",blocks2Remove.size(),cluster.getNamesystem().getUnderReplicatedBlocks()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Test write a file, verifies and closes it. Then the length of the blocks * are messed up and BlockReport is forced. * The modification of blocks' length has to be ignored * @throws java.io.IOException on an error */ @Test(timeout=300000) public void blockReport_01() throws IOException { final String METHOD_NAME=GenericTestUtils.getMethodName(); Path filePath=new Path("/" + METHOD_NAME + ".dat"); ArrayList blocks=prepareForRide(filePath,METHOD_NAME,FILE_SIZE); if (LOG.isDebugEnabled()) { LOG.debug("Number of blocks allocated " + blocks.size()); } long[] oldLengths=new long[blocks.size()]; int tempLen; for (int i=0; i < blocks.size(); i++) { Block b=blocks.get(i); if (LOG.isDebugEnabled()) { LOG.debug("Block " + b.getBlockName() + " before\t"+ "Size "+ b.getNumBytes()); } oldLengths[i]=b.getNumBytes(); if (LOG.isDebugEnabled()) { LOG.debug("Setting new length"); } tempLen=rand.nextInt(BLOCK_SIZE); b.set(b.getBlockId(),tempLen,b.getGenerationStamp()); if (LOG.isDebugEnabled()) { LOG.debug("Block " + b.getBlockName() + " after\t "+ "Size "+ b.getNumBytes()); } } DataNode dn=cluster.getDataNodes().get(DN_N0); String poolId=cluster.getNamesystem().getBlockPoolId(); DatanodeRegistration dnR=dn.getDNRegistrationForBP(poolId); StorageBlockReport[] reports=getBlockReports(dn,poolId,false,false); sendBlockReports(dnR,poolId,reports); List blocksAfterReport=DFSTestUtil.getAllBlocks(fs.open(filePath)); if (LOG.isDebugEnabled()) { LOG.debug("After mods: Number of blocks allocated " + blocksAfterReport.size()); } for (int i=0; i < blocksAfterReport.size(); i++) { ExtendedBlock b=blocksAfterReport.get(i).getBlock(); assertEquals("Length of " + i + "th block is incorrect",oldLengths[i],b.getNumBytes()); } }

InternalCallVerifier EqualityVerifier 
/** * Test for the case where one of the DNs in the pipeline is in the * process of doing a block report exactly when the block is closed. * In this case, the block report becomes delayed until after the * block is marked completed on the NN, and hence it reports an RBW * replica for a COMPLETE block. Such a report should not be marked * corrupt. * This is a regression test for HDFS-2791. */ @Test(timeout=300000) public void testOneReplicaRbwReportArrivesAfterBlockCompleted() throws Exception { final CountDownLatch brFinished=new CountDownLatch(1); DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG){ @Override protected Object passThrough( InvocationOnMock invocation) throws Throwable { try { return super.passThrough(invocation); } finally { brFinished.countDown(); } } } ; final String METHOD_NAME=GenericTestUtils.getMethodName(); Path filePath=new Path("/" + METHOD_NAME + ".dat"); REPL_FACTOR=2; startDNandWait(null,false); NameNode nn=cluster.getNameNode(); FSDataOutputStream out=fs.create(filePath,REPL_FACTOR); try { AppendTestUtil.write(out,0,10); out.hflush(); DataNode dn=cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy=DataNodeTestUtils.spyOnBposToNN(dn,nn); Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.anyObject(),Mockito.anyString(),Mockito.anyObject()); dn.scheduleAllBlockReport(0); delayer.waitForCall(); } finally { IOUtils.closeStream(out); } delayer.proceed(); brFinished.await(); BlockManagerTestUtil.updateState(nn.getNamesystem().getBlockManager()); assertEquals(0,nn.getNamesystem().getCorruptReplicaBlocks()); DFSTestUtil.readFile(fs,filePath); cluster.stopDataNode(1); DFSTestUtil.readFile(fs,filePath); }

Class: org.apache.hadoop.hdfs.server.datanode.TestBPOfferService

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test datanode block pool initialization error handling. * Failure in initializing a block pool should not cause NPE. */ @Test public void testBPInitErrorHandling() throws Exception { final DataNode mockDn=Mockito.mock(DataNode.class); Mockito.doReturn(true).when(mockDn).shouldRun(); Configuration conf=new Configuration(); File dnDataDir=new File(new File(TEST_BUILD_DATA,"testBPInitErrorHandling"),"data"); conf.set(DFS_DATANODE_DATA_DIR_KEY,dnDataDir.toURI().toString()); Mockito.doReturn(conf).when(mockDn).getConf(); Mockito.doReturn(new DNConf(conf)).when(mockDn).getDnConf(); Mockito.doReturn(DataNodeMetrics.create(conf,"fake dn")).when(mockDn).getMetrics(); final AtomicInteger count=new AtomicInteger(); Mockito.doAnswer(new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { if (count.getAndIncrement() == 0) { throw new IOException("faked initBlockPool exception"); } Mockito.doReturn(mockFSDataset).when(mockDn).getFSDataset(); return null; } } ).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class)); BPOfferService bpos=setupBPOSForNNs(mockDn,mockNN1,mockNN2); List actors=bpos.getBPServiceActors(); assertEquals(2,actors.size()); bpos.start(); try { waitForInitialization(bpos); waitForBlockReport(mockNN1,mockNN2); } finally { bpos.stop(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that the BPOS can register to talk to two different NNs, * sends block reports to both, etc. */ @Test public void testBasicFunctionality() throws Exception { BPOfferService bpos=setupBPOSForNNs(mockNN1,mockNN2); bpos.start(); try { waitForInitialization(bpos); Mockito.verify(mockNN1).registerDatanode(Mockito.any(DatanodeRegistration.class)); Mockito.verify(mockNN2).registerDatanode(Mockito.any(DatanodeRegistration.class)); waitForBlockReport(mockNN1); waitForBlockReport(mockNN2); bpos.notifyNamenodeReceivedBlock(FAKE_BLOCK,"",""); ReceivedDeletedBlockInfo[] ret=waitForBlockReceived(FAKE_BLOCK,mockNN1); assertEquals(1,ret.length); assertEquals(FAKE_BLOCK.getLocalBlock(),ret[0].getBlock()); ret=waitForBlockReceived(FAKE_BLOCK,mockNN2); assertEquals(1,ret.length); assertEquals(FAKE_BLOCK.getLocalBlock(),ret[0].getBlock()); } finally { bpos.stop(); } }

Class: org.apache.hadoop.hdfs.server.datanode.TestBlockPoolManager

EqualityVerifier 
@Test public void testFederationRefresh() throws Exception { Configuration conf=new Configuration(); conf.set(DFSConfigKeys.DFS_NAMESERVICES,"ns1,ns2"); addNN(conf,"ns1","mock1:8020"); addNN(conf,"ns2","mock1:8020"); bpm.refreshNamenodes(conf); assertEquals("create #1\n" + "create #2\n",log.toString()); log.setLength(0); conf.set(DFSConfigKeys.DFS_NAMESERVICES,"ns2"); bpm.refreshNamenodes(conf); assertEquals("stop #1\n" + "refresh #2\n",log.toString()); log.setLength(0); conf.set(DFSConfigKeys.DFS_NAMESERVICES,"ns1,ns2"); bpm.refreshNamenodes(conf); assertEquals("create #3\n" + "refresh #2\n",log.toString()); }

EqualityVerifier 
@Test public void testSimpleSingleNS() throws Exception { Configuration conf=new Configuration(); conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"hdfs://mock1:8020"); bpm.refreshNamenodes(conf); assertEquals("create #1\n",log.toString()); }

Class: org.apache.hadoop.hdfs.server.datanode.TestBlockRecovery

TestInitializer EqualityVerifier HybridVerifier 
/** * Starts an instance of DataNode * @throws IOException */ @Before public void startUp() throws IOException, URISyntaxException { tearDownDone=false; conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,DATA_DIR); conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,"0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,"0.0.0.0:0"); conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,"0.0.0.0:0"); conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,0); FileSystem.setDefaultUri(conf,"hdfs://" + NN_ADDR.getHostName() + ":"+ NN_ADDR.getPort()); ArrayList locations=new ArrayList(); File dataDir=new File(DATA_DIR); FileUtil.fullyDelete(dataDir); dataDir.mkdirs(); StorageLocation location=StorageLocation.parse(dataDir.getPath()); locations.add(location); final DatanodeProtocolClientSideTranslatorPB namenode=mock(DatanodeProtocolClientSideTranslatorPB.class); Mockito.doAnswer(new Answer(){ @Override public DatanodeRegistration answer( InvocationOnMock invocation) throws Throwable { return (DatanodeRegistration)invocation.getArguments()[0]; } } ).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class)); when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1,CLUSTER_ID,POOL_ID,1L)); when(namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class),Mockito.any(StorageReport[].class),Mockito.anyLong(),Mockito.anyLong(),Mockito.anyInt(),Mockito.anyInt(),Mockito.anyInt())).thenReturn(new HeartbeatResponse(new DatanodeCommand[0],new NNHAStatusHeartbeat(HAServiceState.ACTIVE,1),null)); dn=new DataNode(conf,locations,null){ @Override DatanodeProtocolClientSideTranslatorPB connectToNN( InetSocketAddress nnAddr) throws IOException { Assert.assertEquals(NN_ADDR,nnAddr); return namenode; } } ; dn.getAllBpOs()[0].triggerHeartbeatForTests(); }

Class: org.apache.hadoop.hdfs.server.datanode.TestBlockReplacement

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBlockReplacement() throws Exception { final Configuration CONF=new HdfsConfiguration(); final String[] INITIAL_RACKS={"/RACK0","/RACK1","/RACK2"}; final String[] NEW_RACKS={"/RACK2"}; final short REPLICATION_FACTOR=(short)3; final int DEFAULT_BLOCK_SIZE=1024; final Random r=new Random(); CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DEFAULT_BLOCK_SIZE); CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,DEFAULT_BLOCK_SIZE / 2); CONF.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,500); cluster=new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).racks(INITIAL_RACKS).build(); try { cluster.waitActive(); FileSystem fs=cluster.getFileSystem(); Path fileName=new Path("/tmp.txt"); DFSTestUtil.createFile(fs,fileName,DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,r.nextLong()); DFSTestUtil.waitReplication(fs,fileName,REPLICATION_FACTOR); InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,CONF); List locatedBlocks=client.getNamenode().getBlockLocations("/tmp.txt",0,DEFAULT_BLOCK_SIZE).getLocatedBlocks(); assertEquals(1,locatedBlocks.size()); LocatedBlock block=locatedBlocks.get(0); DatanodeInfo[] oldNodes=block.getLocations(); assertEquals(oldNodes.length,3); ExtendedBlock b=block.getBlock(); cluster.startDataNodes(CONF,1,true,null,NEW_RACKS); cluster.waitActive(); DatanodeInfo[] datanodes=client.datanodeReport(DatanodeReportType.ALL); DatanodeInfo newNode=null; for ( DatanodeInfo node : datanodes) { Boolean isNewNode=true; for ( DatanodeInfo oldNode : oldNodes) { if (node.equals(oldNode)) { isNewNode=false; break; } } if (isNewNode) { newNode=node; break; } } assertTrue(newNode != null); DatanodeInfo source=null; ArrayList proxies=new ArrayList(2); for ( DatanodeInfo node : datanodes) { if (node != newNode) { if (node.getNetworkLocation().equals(newNode.getNetworkLocation())) { source=node; } else { proxies.add(node); } } } assertTrue(source != null && proxies.size() == 2); LOG.info("Testcase 1: Proxy " + newNode + " does not contain the block "+ b); assertFalse(replaceBlock(b,source,newNode,proxies.get(0))); LOG.info("Testcase 2: Destination " + proxies.get(1) + " contains the block "+ b); assertFalse(replaceBlock(b,source,proxies.get(0),proxies.get(1))); LOG.info("Testcase 3: Source=" + source + " Proxy="+ proxies.get(0)+ " Destination="+ newNode); assertTrue(replaceBlock(b,source,proxies.get(0),newNode)); checkBlocks(new DatanodeInfo[]{newNode,proxies.get(0),proxies.get(1)},fileName.toString(),DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,client); LOG.info("Testcase 4: invalid del hint " + proxies.get(0)); assertTrue(replaceBlock(b,proxies.get(0),proxies.get(1),source)); checkBlocks(proxies.toArray(new DatanodeInfo[proxies.size()]),fileName.toString(),DEFAULT_BLOCK_SIZE,REPLICATION_FACTOR,client); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataDirs

APIUtilityVerifier EqualityVerifier ConditionMatcher HybridVerifier 
@Test(timeout=30000) public void testDataDirValidation() throws Throwable { DataNodeDiskChecker diskChecker=mock(DataNodeDiskChecker.class); doThrow(new IOException()).doThrow(new IOException()).doNothing().when(diskChecker).checkDir(any(LocalFileSystem.class),any(Path.class)); LocalFileSystem fs=mock(LocalFileSystem.class); AbstractList locations=new ArrayList(); locations.add(StorageLocation.parse("file:/p1/")); locations.add(StorageLocation.parse("file:/p2/")); locations.add(StorageLocation.parse("file:/p3/")); List checkedLocations=DataNode.checkStorageLocations(locations,fs,diskChecker); assertEquals("number of valid data dirs",1,checkedLocations.size()); String validDir=checkedLocations.iterator().next().getFile().getPath(); assertThat("p3 should be valid",new File("/p3/").getPath(),is(validDir)); }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataNodeMXBean

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDataNodeMXBean() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { List datanodes=cluster.getDataNodes(); Assert.assertEquals(datanodes.size(),1); DataNode datanode=datanodes.get(0); MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=DataNode,name=DataNodeInfo"); String clusterId=(String)mbs.getAttribute(mxbeanName,"ClusterId"); Assert.assertEquals(datanode.getClusterId(),clusterId); String version=(String)mbs.getAttribute(mxbeanName,"Version"); Assert.assertEquals(datanode.getVersion(),version); String rpcPort=(String)mbs.getAttribute(mxbeanName,"RpcPort"); Assert.assertEquals(datanode.getRpcPort(),rpcPort); String httpPort=(String)mbs.getAttribute(mxbeanName,"HttpPort"); Assert.assertEquals(datanode.getHttpPort(),httpPort); String namenodeAddresses=(String)mbs.getAttribute(mxbeanName,"NamenodeAddresses"); Assert.assertEquals(datanode.getNamenodeAddresses(),namenodeAddresses); String volumeInfo=(String)mbs.getAttribute(mxbeanName,"VolumeInfo"); Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()),replaceDigits(volumeInfo)); int xceiverCount=(Integer)mbs.getAttribute(mxbeanName,"XceiverCount"); Assert.assertEquals(datanode.getXceiverCount(),xceiverCount); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataNodeMetrics

InternalCallVerifier EqualityVerifier 
@Test public void testDataNodeMetrics() throws Exception { Configuration conf=new HdfsConfiguration(); SimulatedFSDataset.setFactory(conf); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { FileSystem fs=cluster.getFileSystem(); final long LONG_FILE_LEN=Integer.MAX_VALUE + 1L; DFSTestUtil.createFile(fs,new Path("/tmp.txt"),LONG_FILE_LEN,(short)1,1L); List datanodes=cluster.getDataNodes(); assertEquals(datanodes.size(),1); DataNode datanode=datanodes.get(0); MetricsRecordBuilder rb=getMetrics(datanode.getMetrics().name()); assertCounter("BytesWritten",LONG_FILE_LEN,rb); } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier EqualityVerifier 
@Test public void testSendDataPacketMetrics() throws Exception { Configuration conf=new HdfsConfiguration(); final int interval=1; conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,"" + interval); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { FileSystem fs=cluster.getFileSystem(); Path tmpfile=new Path("/tmp.txt"); DFSTestUtil.createFile(fs,tmpfile,(long)1,(short)1,1L); DFSTestUtil.readFile(fs,tmpfile); List datanodes=cluster.getDataNodes(); assertEquals(datanodes.size(),1); DataNode datanode=datanodes.get(0); MetricsRecordBuilder rb=getMetrics(datanode.getMetrics().name()); assertCounter("SendDataPacketTransferNanosNumOps",(long)2,rb); assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps",(long)2,rb); Thread.sleep((interval + 1) * 1000); String sec=interval + "s"; assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec,rb); assertQuantileGauges("SendDataPacketTransferNanos" + sec,rb); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataNodeMultipleRegistrations

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDNWithInvalidStorageWithHA() throws Exception { MiniDFSNNTopology top=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0").setClusterId("cluster-1")).addNN(new MiniDFSNNTopology.NNConf("nn1").setClusterId("cluster-1"))); top.setFederation(true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build(); try { cluster.startDataNodes(conf,1,true,null,null); Thread.sleep(10000); DataNode dn=cluster.getDataNodes().get(0); assertTrue("Datanode should be running",dn.isDatanodeUp()); assertEquals("BPOfferService should be running",1,dn.getAllBpOs().length); DataNodeProperties dnProp=cluster.stopDataNode(0); cluster.getNameNode(0).stop(); cluster.getNameNode(1).stop(); Configuration nn1=cluster.getConfiguration(0); Configuration nn2=cluster.getConfiguration(1); StartupOption.FORMAT.setClusterId("cluster-2"); DFSTestUtil.formatNameNode(nn1); MiniDFSCluster.copyNameDirs(FSNamesystem.getNamespaceDirs(nn1),FSNamesystem.getNamespaceDirs(nn2),nn2); cluster.restartNameNode(0,false); cluster.restartNameNode(1,false); cluster.restartDataNode(dnProp); Thread.sleep(10000); dn=cluster.getDataNodes().get(0); assertFalse("Datanode should have shutdown as only service failed",dn.isDatanodeUp()); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * starts single nn and single dn and verifies registration and handshake * @throws IOException */ @Test public void testFedSingleNN() throws IOException { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nameNodePort(9927).build(); try { NameNode nn1=cluster.getNameNode(); assertNotNull("cannot create nn1",nn1); String bpid1=FSImageTestUtil.getFSImage(nn1).getBlockPoolID(); String cid1=FSImageTestUtil.getFSImage(nn1).getClusterID(); int lv1=FSImageTestUtil.getFSImage(nn1).getLayoutVersion(); LOG.info("nn1: lv=" + lv1 + ";cid="+ cid1+ ";bpid="+ bpid1+ ";uri="+ nn1.getNameNodeAddress()); DataNode dn=cluster.getDataNodes().get(0); final Map volInfos=dn.data.getVolumeInfoMap(); Assert.assertTrue("No volumes in the fsdataset",volInfos.size() > 0); int i=0; for ( Map.Entry e : volInfos.entrySet()) { LOG.info("vol " + i++ + ") "+ e.getKey()+ ": "+ e.getValue()); } assertEquals("number of volumes is wrong",2,volInfos.size()); for ( BPOfferService bpos : dn.getAllBpOs()) { LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid="+ bpos.bpRegistration.getDatanodeUuid()+ "; nna="+ getNNSocketAddress(bpos)); } BPOfferService bpos1=dn.getAllBpOs()[0]; bpos1.triggerBlockReportForTests(); assertEquals("wrong nn address",getNNSocketAddress(bpos1),nn1.getNameNodeAddress()); assertEquals("wrong bpid",bpos1.getBlockPoolId(),bpid1); assertEquals("wrong cid",dn.getClusterId(),cid1); cluster.shutdown(); assertEquals(0,dn.getAllBpOs().length); cluster=null; } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMiniDFSClusterWithMultipleNN() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build(); try { cluster.waitActive(); Assert.assertEquals("(1)Should be 2 namenodes",2,cluster.getNumNameNodes()); cluster.addNameNode(conf,0); Assert.assertEquals("(1)Should be 3 namenodes",3,cluster.getNumNameNodes()); } catch ( IOException ioe) { Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe)); } finally { cluster.shutdown(); } conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1)).build(); try { Assert.assertNotNull(cluster); cluster.waitActive(); Assert.assertEquals("(2)Should be 1 namenodes",1,cluster.getNumNameNodes()); cluster.addNameNode(conf,0); Assert.assertEquals("(2)Should be 2 namenodes",2,cluster.getNumNameNodes()); } catch ( IOException ioe) { Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe)); } finally { cluster.shutdown(); } conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).build(); try { cluster.waitActive(); Assert.assertNotNull(cluster); Assert.assertEquals("(2)Should be 1 namenodes",1,cluster.getNumNameNodes()); cluster.addNameNode(conf,9929); Assert.fail("shouldn't be able to add another NN to non federated cluster"); } catch ( IOException e) { Assert.assertTrue(e.getMessage().startsWith("cannot add namenode")); Assert.assertEquals("(3)Should be 1 namenodes",1,cluster.getNumNameNodes()); } finally { cluster.shutdown(); } }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=20000) public void testClusterIdMismatchAtStartupWithHA() throws Exception { MiniDFSNNTopology top=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn0")).addNN(new MiniDFSNNTopology.NNConf("nn1"))).addNameservice(new MiniDFSNNTopology.NSConf("ns2").addNN(new MiniDFSNNTopology.NNConf("nn2").setClusterId("bad-cid")).addNN(new MiniDFSNNTopology.NNConf("nn3").setClusterId("bad-cid"))); top.setFederation(true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(top).numDataNodes(0).build(); try { cluster.startDataNodes(conf,1,true,null,null); Thread.sleep(10000); DataNode dn=cluster.getDataNodes().get(0); assertTrue("Datanode should be running",dn.isDatanodeUp()); assertEquals("Only one BPOfferService should be running",1,dn.getAllBpOs().length); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * start multiple NNs and single DN and verifies per BP registrations and * handshakes. * @throws IOException */ @Test public void test2NNRegistration() throws IOException { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build(); try { cluster.waitActive(); NameNode nn1=cluster.getNameNode(0); NameNode nn2=cluster.getNameNode(1); assertNotNull("cannot create nn1",nn1); assertNotNull("cannot create nn2",nn2); String bpid1=FSImageTestUtil.getFSImage(nn1).getBlockPoolID(); String bpid2=FSImageTestUtil.getFSImage(nn2).getBlockPoolID(); String cid1=FSImageTestUtil.getFSImage(nn1).getClusterID(); String cid2=FSImageTestUtil.getFSImage(nn2).getClusterID(); int lv1=FSImageTestUtil.getFSImage(nn1).getLayoutVersion(); int lv2=FSImageTestUtil.getFSImage(nn2).getLayoutVersion(); int ns1=FSImageTestUtil.getFSImage(nn1).getNamespaceID(); int ns2=FSImageTestUtil.getFSImage(nn2).getNamespaceID(); assertNotSame("namespace ids should be different",ns1,ns2); LOG.info("nn1: lv=" + lv1 + ";cid="+ cid1+ ";bpid="+ bpid1+ ";uri="+ nn1.getNameNodeAddress()); LOG.info("nn2: lv=" + lv2 + ";cid="+ cid2+ ";bpid="+ bpid2+ ";uri="+ nn2.getNameNodeAddress()); DataNode dn=cluster.getDataNodes().get(0); final Map volInfos=dn.data.getVolumeInfoMap(); Assert.assertTrue("No volumes in the fsdataset",volInfos.size() > 0); int i=0; for ( Map.Entry e : volInfos.entrySet()) { LOG.info("vol " + i++ + ") "+ e.getKey()+ ": "+ e.getValue()); } assertEquals("number of volumes is wrong",2,volInfos.size()); for ( BPOfferService bpos : dn.getAllBpOs()) { LOG.info("BP: " + bpos); } BPOfferService bpos1=dn.getAllBpOs()[0]; BPOfferService bpos2=dn.getAllBpOs()[1]; if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) { BPOfferService tmp=bpos1; bpos1=bpos2; bpos2=tmp; } assertEquals("wrong nn address",getNNSocketAddress(bpos1),nn1.getNameNodeAddress()); assertEquals("wrong nn address",getNNSocketAddress(bpos2),nn2.getNameNodeAddress()); assertEquals("wrong bpid",bpos1.getBlockPoolId(),bpid1); assertEquals("wrong bpid",bpos2.getBlockPoolId(),bpid2); assertEquals("wrong cid",dn.getClusterId(),cid1); assertEquals("cid should be same",cid2,cid1); assertEquals("namespace should be same",bpos1.bpNSInfo.namespaceID,ns1); assertEquals("namespace should be same",bpos2.bpNSInfo.namespaceID,ns2); } finally { cluster.shutdown(); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testClusterIdMismatch() throws Exception { MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build(); try { cluster.waitActive(); DataNode dn=cluster.getDataNodes().get(0); BPOfferService[] bposs=dn.getAllBpOs(); LOG.info("dn bpos len (should be 2):" + bposs.length); Assert.assertEquals("should've registered with two namenodes",bposs.length,2); cluster.addNameNode(conf,9938); Thread.sleep(500); bposs=dn.getAllBpOs(); LOG.info("dn bpos len (should be 3):" + bposs.length); Assert.assertEquals("should've registered with three namenodes",bposs.length,3); StartupOption.FORMAT.setClusterId("DifferentCID"); cluster.addNameNode(conf,9948); NameNode nn4=cluster.getNameNode(3); assertNotNull("cannot create nn4",nn4); Thread.sleep(500); bposs=dn.getAllBpOs(); LOG.info("dn bpos len (still should be 3):" + bposs.length); Assert.assertEquals("should've registered with three namenodes",3,bposs.length); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailureReporting

InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test that individual volume failures do not cause DNs to fail, that * all volumes failed on a single datanode do cause it to fail, and * that the capacities and liveliness is adjusted correctly in the NN. */ @Test public void testSuccessiveVolumeFailures() throws Exception { assumeTrue(!System.getProperty("os.name").startsWith("Windows")); cluster.startDataNodes(conf,2,true,null,null); cluster.waitActive(); Thread.sleep(WAIT_FOR_HEARTBEATS); final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager(); final long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm); long dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0); File dn1Vol1=new File(dataDir,"data" + (2 * 0 + 1)); File dn2Vol1=new File(dataDir,"data" + (2 * 1 + 1)); File dn3Vol1=new File(dataDir,"data" + (2 * 2 + 1)); File dn3Vol2=new File(dataDir,"data" + (2 * 2 + 2)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,false)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,false)); Path file1=new Path("/test1"); DFSTestUtil.createFile(fs,file1,1024,(short)3,1L); DFSTestUtil.waitReplication(fs,file1,(short)3); ArrayList dns=cluster.getDataNodes(); assertTrue("DN1 should be up",dns.get(0).isDatanodeUp()); assertTrue("DN2 should be up",dns.get(1).isDatanodeUp()); assertTrue("DN3 should be up",dns.get(2).isDatanodeUp()); assertCounter("VolumeFailures",1L,getMetrics(dns.get(0).getMetrics().name())); assertCounter("VolumeFailures",1L,getMetrics(dns.get(1).getMetrics().name())); assertCounter("VolumeFailures",0L,getMetrics(dns.get(2).getMetrics().name())); assert (WAIT_FOR_HEARTBEATS * 10) > WAIT_FOR_DEATH; DFSTestUtil.waitForDatanodeStatus(dm,3,0,2,origCapacity - (1 * dnCapacity),WAIT_FOR_HEARTBEATS); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,false)); Path file2=new Path("/test2"); DFSTestUtil.createFile(fs,file2,1024,(short)3,1L); DFSTestUtil.waitReplication(fs,file2,(short)3); assertTrue("DN3 should still be up",dns.get(2).isDatanodeUp()); assertCounter("VolumeFailures",1L,getMetrics(dns.get(2).getMetrics().name())); ArrayList live=new ArrayList(); ArrayList dead=new ArrayList(); dm.fetchDatanodes(live,dead,false); live.clear(); dead.clear(); dm.fetchDatanodes(live,dead,false); assertEquals("DN3 should have 1 failed volume",1,live.get(2).getVolumeFailures()); dnCapacity=DFSTestUtil.getDatanodeCapacity(dm,0); DFSTestUtil.waitForDatanodeStatus(dm,3,0,3,origCapacity - (3 * dnCapacity),WAIT_FOR_HEARTBEATS); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,false)); Path file3=new Path("/test3"); DFSTestUtil.createFile(fs,file3,1024,(short)3,1L); DFSTestUtil.waitReplication(fs,file3,(short)2); DFSTestUtil.waitForDatanodeDeath(dns.get(2)); assertCounter("VolumeFailures",2L,getMetrics(dns.get(2).getMetrics().name())); DFSTestUtil.waitForDatanodeStatus(dm,2,1,2,origCapacity - (4 * dnCapacity),WAIT_FOR_HEARTBEATS); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn1Vol1,true)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn2Vol1,true)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol1,true)); assertTrue("Couldn't chmod local vol",FileUtil.setExecutable(dn3Vol2,true)); cluster.restartDataNodes(); cluster.waitActive(); Path file4=new Path("/test4"); DFSTestUtil.createFile(fs,file4,1024,(short)3,1L); DFSTestUtil.waitReplication(fs,file4,(short)3); DFSTestUtil.waitForDatanodeStatus(dm,3,0,0,origCapacity,WAIT_FOR_HEARTBEATS); }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataNodeVolumeFailureToleration

InternalCallVerifier AssumptionSetter EqualityVerifier HybridVerifier 
/** * Test that a volume that is considered failed on startup is seen as * a failed volume by the NN. */ @Test public void testFailedVolumeOnStartupIsCounted() throws Exception { assumeTrue(!System.getProperty("os.name").startsWith("Windows")); final DatanodeManager dm=cluster.getNamesystem().getBlockManager().getDatanodeManager(); long origCapacity=DFSTestUtil.getLiveDatanodeCapacity(dm); File dir=new File(cluster.getInstanceStorageDir(0,0),"current"); try { prepareDirToFail(dir); restartDatanodes(1,false); assertEquals(true,cluster.getDataNodes().get(0).isBPServiceAlive(cluster.getNamesystem().getBlockPoolId())); DFSTestUtil.waitForDatanodeStatus(dm,1,0,1,origCapacity / 2,WAIT_FOR_HEARTBEATS); } finally { FileUtil.chmod(dir.toString(),"755"); } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDataStorage

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testRecoverTransitionReadFailure() throws IOException { final int numLocations=3; List locations=createStorageLocations(numLocations,true); try { storage.recoverTransitionRead(mockDN,nsInfo,locations,START_OPT); fail("An IOException should throw: all StorageLocations are NON_EXISTENT"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("All specified directories are not accessible or do not exist.",e); } assertEquals(0,storage.getNumStorageDirs()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * This test enforces the behavior that if there is an exception from * doTransition() during DN starts up, the storage directories that have * already been processed are still visible, i.e., in * DataStorage.storageDirs(). */ @Test public void testRecoverTransitionReadDoTransitionFailure() throws IOException { final int numLocations=3; List locations=createStorageLocations(numLocations); String bpid=nsInfo.getBlockPoolID(); storage.recoverTransitionRead(mockDN,bpid,nsInfo,locations,START_OPT); storage.unlockAll(); storage=new DataStorage(); nsInfo.clusterID="cluster1"; try { storage.recoverTransitionRead(mockDN,bpid,nsInfo,locations,START_OPT); fail("Expect to throw an exception from doTransition()"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Incompatible clusterIDs",e); } assertEquals(numLocations,storage.getNumStorageDirs()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testAddStorageDirectories() throws IOException, URISyntaxException { final int numLocations=3; final int numNamespace=3; List locations=createStorageLocations(numLocations); List namespaceInfos=createNamespaceInfos(numNamespace); for ( NamespaceInfo ni : namespaceInfos) { storage.addStorageLocations(mockDN,ni,locations,START_OPT); for ( StorageLocation sl : locations) { checkDir(sl.getFile()); checkDir(sl.getFile(),ni.getBlockPoolID()); } } assertEquals(numLocations,storage.getNumStorageDirs()); locations=createStorageLocations(numLocations); try { storage.addStorageLocations(mockDN,namespaceInfos.get(0),locations,START_OPT); fail("Expected to throw IOException: adding active directories."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("All specified directories are not accessible or do not exist.",e); } assertEquals(numLocations,storage.getNumStorageDirs()); locations=createStorageLocations(6); storage.addStorageLocations(mockDN,nsInfo,locations,START_OPT); assertEquals(6,storage.getNumStorageDirs()); }

Class: org.apache.hadoop.hdfs.server.datanode.TestDatanodeRegister

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testDifferentLayoutVersions() throws Exception { assertEquals(HdfsConstants.NAMENODE_LAYOUT_VERSION,actor.retrieveNamespaceInfo().getLayoutVersion()); doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION * 1000).when(fakeNsInfo).getLayoutVersion(); try { actor.retrieveNamespaceInfo(); } catch ( IOException e) { fail("Should not fail to retrieve NS info from DN with different layout version"); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSoftwareVersionDifferences() throws Exception { assertEquals(VersionInfo.getVersion(),actor.retrieveNamespaceInfo().getSoftwareVersion()); doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion(); doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion(); assertEquals("4.0.0",actor.retrieveNamespaceInfo().getSoftwareVersion()); doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion(); doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion(); try { actor.retrieveNamespaceInfo(); fail("Should have thrown an exception for NN with too-low version"); } catch ( IncorrectVersionException ive) { GenericTestUtils.assertExceptionContains("The reported NameNode version is too low",ive); LOG.info("Got expected exception",ive); } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDeleteBlockPool

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDfsAdminDeleteBlockPool() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1,namesServerId2"); cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).numDataNodes(1).build(); cluster.waitActive(); FileSystem fs1=cluster.getFileSystem(0); FileSystem fs2=cluster.getFileSystem(1); DFSTestUtil.createFile(fs1,new Path("/alpha"),1024,(short)1,54); DFSTestUtil.createFile(fs2,new Path("/beta"),1024,(short)1,54); DataNode dn1=cluster.getDataNodes().get(0); String bpid1=cluster.getNamesystem(0).getBlockPoolId(); String bpid2=cluster.getNamesystem(1).getBlockPoolId(); File dn1StorageDir1=cluster.getInstanceStorageDir(0,0); File dn1StorageDir2=cluster.getInstanceStorageDir(0,1); Configuration nn1Conf=cluster.getConfiguration(0); nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1"); dn1.refreshNamenodes(nn1Conf); assertEquals(1,dn1.getAllBpOs().length); DFSAdmin admin=new DFSAdmin(nn1Conf); String dn1Address=dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort(); String[] args={"-deleteBlockPool",dn1Address,bpid2}; int ret=admin.run(args); assertFalse(0 == ret); verifyBlockPoolDirectories(true,dn1StorageDir1,bpid2); verifyBlockPoolDirectories(true,dn1StorageDir2,bpid2); String[] forceArgs={"-deleteBlockPool",dn1Address,bpid2,"force"}; ret=admin.run(forceArgs); assertEquals(0,ret); verifyBlockPoolDirectories(false,dn1StorageDir1,bpid2); verifyBlockPoolDirectories(false,dn1StorageDir2,bpid2); verifyBlockPoolDirectories(true,dn1StorageDir1,bpid1); verifyBlockPoolDirectories(true,dn1StorageDir2,bpid1); } finally { if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testDeleteBlockPool() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId1,namesServerId2"); cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).numDataNodes(2).build(); cluster.waitActive(); FileSystem fs1=cluster.getFileSystem(0); FileSystem fs2=cluster.getFileSystem(1); DFSTestUtil.createFile(fs1,new Path("/alpha"),1024,(short)2,54); DFSTestUtil.createFile(fs2,new Path("/beta"),1024,(short)2,54); DataNode dn1=cluster.getDataNodes().get(0); DataNode dn2=cluster.getDataNodes().get(1); String bpid1=cluster.getNamesystem(0).getBlockPoolId(); String bpid2=cluster.getNamesystem(1).getBlockPoolId(); File dn1StorageDir1=cluster.getInstanceStorageDir(0,0); File dn1StorageDir2=cluster.getInstanceStorageDir(0,1); File dn2StorageDir1=cluster.getInstanceStorageDir(1,0); File dn2StorageDir2=cluster.getInstanceStorageDir(1,1); try { dn1.deleteBlockPool(bpid1,true); fail("Must not delete a running block pool"); } catch ( IOException expected) { } Configuration nn1Conf=cluster.getConfiguration(1); nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES,"namesServerId2"); dn1.refreshNamenodes(nn1Conf); assertEquals(1,dn1.getAllBpOs().length); try { dn1.deleteBlockPool(bpid1,false); fail("Must not delete if any block files exist unless " + "force is true"); } catch ( IOException expected) { } verifyBlockPoolDirectories(true,dn1StorageDir1,bpid1); verifyBlockPoolDirectories(true,dn1StorageDir2,bpid1); dn1.deleteBlockPool(bpid1,true); verifyBlockPoolDirectories(false,dn1StorageDir1,bpid1); verifyBlockPoolDirectories(false,dn1StorageDir2,bpid1); fs1.delete(new Path("/alpha"),true); File finalDir1=MiniDFSCluster.getFinalizedDir(dn2StorageDir1,bpid1); File finalDir2=MiniDFSCluster.getFinalizedDir(dn2StorageDir1,bpid2); while ((!DatanodeUtil.dirNoFilesRecursive(finalDir1)) || (!DatanodeUtil.dirNoFilesRecursive(finalDir2))) { try { Thread.sleep(3000); } catch ( Exception ignored) { } } cluster.shutdownNameNode(0); try { dn2.deleteBlockPool(bpid1,true); fail("Must not delete a running block pool"); } catch ( IOException expected) { } dn2.refreshNamenodes(nn1Conf); assertEquals(1,dn2.getAllBpOs().length); verifyBlockPoolDirectories(true,dn2StorageDir1,bpid1); verifyBlockPoolDirectories(true,dn2StorageDir2,bpid1); dn2.deleteBlockPool(bpid1,false); verifyBlockPoolDirectories(false,dn2StorageDir1,bpid1); verifyBlockPoolDirectories(false,dn2StorageDir2,bpid1); verifyBlockPoolDirectories(true,dn1StorageDir1,bpid2); verifyBlockPoolDirectories(true,dn1StorageDir2,bpid2); verifyBlockPoolDirectories(true,dn2StorageDir1,bpid2); verifyBlockPoolDirectories(true,dn2StorageDir2,bpid2); Path gammaFile=new Path("/gamma"); DFSTestUtil.createFile(fs2,gammaFile,1024,(short)1,55); fs2.setReplication(gammaFile,(short)2); DFSTestUtil.waitReplication(fs2,gammaFile,(short)2); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.datanode.TestDiskError

InternalCallVerifier EqualityVerifier 
/** * Test that when there is a failure replicating a block the temporary * and meta files are cleaned up and subsequent replication succeeds. */ @Test public void testReplicationError() throws Exception { final Path fileName=new Path("/test.txt"); final int fileLen=1; DFSTestUtil.createFile(fs,fileName,1,(short)1,1L); DFSTestUtil.waitReplication(fs,fileName,(short)1); LocatedBlocks blocks=NameNodeAdapter.getBlockLocations(cluster.getNameNode(),fileName.toString(),0,(long)fileLen); assertEquals("Should only find 1 block",blocks.locatedBlockCount(),1); LocatedBlock block=blocks.get(0); cluster.startDataNodes(conf,1,true,null,null); cluster.waitActive(); final int sndNode=1; DataNode datanode=cluster.getDataNodes().get(sndNode); InetSocketAddress target=datanode.getXferAddress(); Socket s=new Socket(target.getAddress(),target.getPort()); DataOutputStream out=new DataOutputStream(s.getOutputStream()); DataChecksum checksum=DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512); new Sender(out).writeBlock(block.getBlock(),StorageType.DEFAULT,BlockTokenSecretManager.DUMMY_TOKEN,"",new DatanodeInfo[0],new StorageType[0],null,BlockConstructionStage.PIPELINE_SETUP_CREATE,1,0L,0L,0L,checksum,CachingStrategy.newDefaultStrategy()); out.flush(); out.close(); String bpid=cluster.getNamesystem().getBlockPoolId(); File storageDir=cluster.getInstanceStorageDir(sndNode,0); File dir1=MiniDFSCluster.getRbwDir(storageDir,bpid); storageDir=cluster.getInstanceStorageDir(sndNode,1); File dir2=MiniDFSCluster.getRbwDir(storageDir,bpid); while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) { Thread.sleep(100); } fs.setReplication(fileName,(short)2); DFSTestUtil.waitReplication(fs,fileName,(short)1); fs.delete(fileName,false); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Check that the permissions of the local DN directories are as expected. */ @Test public void testLocalDirs() throws Exception { Configuration conf=new Configuration(); final String permStr=conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY); FsPermission expected=new FsPermission(permStr); FileSystem localFS=FileSystem.getLocal(conf); for ( DataNode dn : cluster.getDataNodes()) { for ( FsVolumeSpi v : dn.getFSDataset().getVolumes()) { String dir=v.getBasePath(); Path dataDir=new Path(dir); FsPermission actual=localFS.getFileStatus(dataDir).getPermission(); assertEquals("Permission for dir: " + dataDir + ", is "+ actual+ ", while expected is "+ expected,expected,actual); } } }

Class: org.apache.hadoop.hdfs.server.datanode.TestFsDatasetCache

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=600000) public void testUncachingBlocksBeforeCachingFinishes() throws Exception { LOG.info("beginning testUncachingBlocksBeforeCachingFinishes"); final int NUM_BLOCKS=5; DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd); final Path testFile=new Path("/testCacheBlock"); final long testFileLen=BLOCK_SIZE * NUM_BLOCKS; DFSTestUtil.createFile(fs,testFile,testFileLen,(short)1,0xABBAl); HdfsBlockLocation[] locs=(HdfsBlockLocation[])fs.getFileBlockLocations(testFile,0,testFileLen); assertEquals("Unexpected number of blocks",NUM_BLOCKS,locs.length); final long[] blockSizes=getBlockSizes(locs); final long cacheCapacity=fsd.getCacheCapacity(); long cacheUsed=fsd.getCacheUsed(); long current=0; assertEquals("Unexpected cache capacity",CACHE_CAPACITY,cacheCapacity); assertEquals("Unexpected amount of cache used",current,cacheUsed); NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator(){ @Override public void mlock( String identifier, ByteBuffer mmap, long length) throws IOException { LOG.info("An mlock operation is starting on " + identifier); try { Thread.sleep(3000); } catch ( InterruptedException e) { Assert.fail(); } } } ); for (int i=0; i < NUM_BLOCKS; i++) { setHeartbeatResponse(cacheBlock(locs[i])); current=DFSTestUtil.verifyExpectedCacheUsage(current + blockSizes[i],i + 1,fsd); } setHeartbeatResponse(new DatanodeCommand[]{getResponse(locs,DatanodeProtocol.DNA_UNCACHE)}); current=DFSTestUtil.verifyExpectedCacheUsage(0,0,fsd); LOG.info("finishing testUncachingBlocksBeforeCachingFinishes"); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testReCacheAfterUncache() throws Exception { final int TOTAL_BLOCKS_PER_CACHE=Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE); BlockReaderTestUtil.enableHdfsCachingTracing(); Assert.assertEquals(0,CACHE_CAPACITY % BLOCK_SIZE); final Path SMALL_FILE=new Path("/smallFile"); DFSTestUtil.createFile(fs,SMALL_FILE,BLOCK_SIZE,(short)1,0xcafe); final Path BIG_FILE=new Path("/bigFile"); DFSTestUtil.createFile(fs,BIG_FILE,TOTAL_BLOCKS_PER_CACHE * BLOCK_SIZE,(short)1,0xbeef); final DistributedFileSystem dfs=cluster.getFileSystem(); dfs.addCachePool(new CachePoolInfo("pool")); final long bigCacheDirectiveId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(BIG_FILE).setReplication((short)1).build()); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ MetricsRecordBuilder dnMetrics=getMetrics(dn.getMetrics().name()); long blocksCached=MetricsAsserts.getLongCounter("BlocksCached",dnMetrics); if (blocksCached != TOTAL_BLOCKS_PER_CACHE) { LOG.info("waiting for " + TOTAL_BLOCKS_PER_CACHE + " to "+ "be cached. Right now only "+ blocksCached+ " blocks are cached."); return false; } LOG.info(TOTAL_BLOCKS_PER_CACHE + " blocks are now cached."); return true; } } ,1000,30000); final long shortCacheDirectiveId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(SMALL_FILE).setReplication((short)1).build()); Thread.sleep(10000); MetricsRecordBuilder dnMetrics=getMetrics(dn.getMetrics().name()); Assert.assertEquals(TOTAL_BLOCKS_PER_CACHE,MetricsAsserts.getLongCounter("BlocksCached",dnMetrics)); dfs.removeCacheDirective(bigCacheDirectiveId); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ RemoteIterator iter; try { iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().build()); CacheDirectiveEntry entry; do { entry=iter.next(); } while (entry.getInfo().getId() != shortCacheDirectiveId); if (entry.getStats().getFilesCached() != 1) { LOG.info("waiting for directive " + shortCacheDirectiveId + " to be cached. stats = "+ entry.getStats()); return false; } LOG.info("directive " + shortCacheDirectiveId + " has been cached."); } catch ( IOException e) { Assert.fail("unexpected exception" + e.toString()); } return true; } } ,1000,30000); dfs.removeCacheDirective(shortCacheDirectiveId); }

Class: org.apache.hadoop.hdfs.server.datanode.TestRefreshNamenodes

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRefreshNamenodes() throws IOException { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { MiniDFSNNTopology topology=new MiniDFSNNTopology().addNameservice(new NSConf("ns1").addNN(new NNConf(null).setIpcPort(nnPort1))).setFederation(true); cluster=new MiniDFSCluster.Builder(conf).nnTopology(topology).build(); DataNode dn=cluster.getDataNodes().get(0); assertEquals(1,dn.getAllBpOs().length); cluster.addNameNode(conf,nnPort2); assertEquals(2,dn.getAllBpOs().length); cluster.addNameNode(conf,nnPort3); assertEquals(3,dn.getAllBpOs().length); cluster.addNameNode(conf,nnPort4); Set nnAddrsFromCluster=Sets.newHashSet(); for (int i=0; i < 4; i++) { assertTrue(nnAddrsFromCluster.add(cluster.getNameNode(i).getNameNodeAddress())); } Set nnAddrsFromDN=Sets.newHashSet(); for ( BPOfferService bpos : dn.getAllBpOs()) { for ( BPServiceActor bpsa : bpos.getBPServiceActors()) { assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress())); } } assertEquals("",Joiner.on(",").join(Sets.symmetricDifference(nnAddrsFromCluster,nnAddrsFromDN))); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.datanode.TestSimulatedFSDataset

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFSDatasetFactory(){ final Configuration conf=new Configuration(); FsDatasetSpi.Factory f=FsDatasetSpi.Factory.getFactory(conf); assertEquals(FsDatasetFactory.class,f.getClass()); assertFalse(f.isSimulated()); SimulatedFSDataset.setFactory(conf); FsDatasetSpi.Factory s=FsDatasetSpi.Factory.getFactory(conf); assertEquals(SimulatedFSDataset.Factory.class,s.getClass()); assertTrue(s.isSimulated()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWriteRead() throws IOException { final SimulatedFSDataset fsdataset=getSimulatedFSDataset(); addSomeBlocks(fsdataset); for (int i=1; i <= NUMBLOCKS; ++i) { ExtendedBlock b=new ExtendedBlock(bpid,i,0,0); assertTrue(fsdataset.isValidBlock(b)); assertEquals(blockIdToLen(i),fsdataset.getLength(b)); checkBlockDataAndSize(fsdataset,b,blockIdToLen(i)); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testInvalidate() throws IOException { final SimulatedFSDataset fsdataset=getSimulatedFSDataset(); int bytesAdded=addSomeBlocks(fsdataset); Block[] deleteBlocks=new Block[2]; deleteBlocks[0]=new Block(1,0,0); deleteBlocks[1]=new Block(2,0,0); fsdataset.invalidate(bpid,deleteBlocks); checkInvalidBlock(new ExtendedBlock(bpid,deleteBlocks[0])); checkInvalidBlock(new ExtendedBlock(bpid,deleteBlocks[1])); long sizeDeleted=blockIdToLen(1) + blockIdToLen(2); assertEquals(bytesAdded - sizeDeleted,fsdataset.getDfsUsed()); assertEquals(fsdataset.getCapacity() - bytesAdded + sizeDeleted,fsdataset.getRemaining()); for (int i=3; i <= NUMBLOCKS; ++i) { Block b=new Block(i,0,0); assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid,b))); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGetBlockReport() throws IOException { SimulatedFSDataset fsdataset=getSimulatedFSDataset(); BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid); assertEquals(0,blockReport.getNumberOfBlocks()); addSomeBlocks(fsdataset); blockReport=fsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); for ( Block b : blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes()); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetMetaData() throws IOException { final SimulatedFSDataset fsdataset=getSimulatedFSDataset(); ExtendedBlock b=new ExtendedBlock(bpid,1,5,0); try { assertTrue(fsdataset.getMetaDataInputStream(b) == null); assertTrue("Expected an IO exception",false); } catch ( IOException e) { } addSomeBlocks(fsdataset); b=new ExtendedBlock(bpid,1,0,0); InputStream metaInput=fsdataset.getMetaDataInputStream(b); DataInputStream metaDataInput=new DataInputStream(metaInput); short version=metaDataInput.readShort(); assertEquals(BlockMetadataHeader.VERSION,version); DataChecksum checksum=DataChecksum.newDataChecksum(metaDataInput); assertEquals(DataChecksum.Type.NULL,checksum.getChecksumType()); assertEquals(0,checksum.getChecksumSize()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInjectionNonEmpty() throws IOException { SimulatedFSDataset fsdataset=getSimulatedFSDataset(); BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid); assertEquals(0,blockReport.getNumberOfBlocks()); int bytesAdded=addSomeBlocks(fsdataset); blockReport=fsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); for ( Block b : blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes()); } fsdataset=null; SimulatedFSDataset sfsdataset=getSimulatedFSDataset(); bytesAdded+=addSomeBlocks(sfsdataset,NUMBLOCKS + 1); sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); sfsdataset.injectBlocks(bpid,blockReport); blockReport=sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS * 2,blockReport.getNumberOfBlocks()); for ( Block b : blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes()); assertEquals(blockIdToLen(b.getBlockId()),sfsdataset.getLength(new ExtendedBlock(bpid,b))); } assertEquals(bytesAdded,sfsdataset.getDfsUsed()); assertEquals(sfsdataset.getCapacity() - bytesAdded,sfsdataset.getRemaining()); conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,10); try { sfsdataset=getSimulatedFSDataset(); sfsdataset.addBlockPool(bpid,conf); sfsdataset.injectBlocks(bpid,blockReport); assertTrue("Expected an IO exception",false); } catch ( IOException e) { } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInjectionEmpty() throws IOException { SimulatedFSDataset fsdataset=getSimulatedFSDataset(); BlockListAsLongs blockReport=fsdataset.getBlockReport(bpid); assertEquals(0,blockReport.getNumberOfBlocks()); int bytesAdded=addSomeBlocks(fsdataset); blockReport=fsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); for ( Block b : blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes()); } SimulatedFSDataset sfsdataset=getSimulatedFSDataset(); sfsdataset.injectBlocks(bpid,blockReport); blockReport=sfsdataset.getBlockReport(bpid); assertEquals(NUMBLOCKS,blockReport.getNumberOfBlocks()); for ( Block b : blockReport) { assertNotNull(b); assertEquals(blockIdToLen(b.getBlockId()),b.getNumBytes()); assertEquals(blockIdToLen(b.getBlockId()),sfsdataset.getLength(new ExtendedBlock(bpid,b))); } assertEquals(bytesAdded,sfsdataset.getDfsUsed()); assertEquals(sfsdataset.getCapacity() - bytesAdded,sfsdataset.getRemaining()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testStorageUsage() throws IOException { final SimulatedFSDataset fsdataset=getSimulatedFSDataset(); assertEquals(fsdataset.getDfsUsed(),0); assertEquals(fsdataset.getRemaining(),fsdataset.getCapacity()); int bytesAdded=addSomeBlocks(fsdataset); assertEquals(bytesAdded,fsdataset.getDfsUsed()); assertEquals(fsdataset.getCapacity() - bytesAdded,fsdataset.getRemaining()); }

Class: org.apache.hadoop.hdfs.server.datanode.TestTransferRbw

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTransferRbw() throws Exception { final HdfsConfiguration conf=new HdfsConfiguration(); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build(); try { cluster.waitActive(); final DistributedFileSystem fs=cluster.getFileSystem(); final Path p=new Path("/foo"); final int size=(1 << 16) + RAN.nextInt(1 << 16); LOG.info("size = " + size); final FSDataOutputStream out=fs.create(p,REPLICATION); final byte[] bytes=new byte[1024]; for (int remaining=size; remaining > 0; ) { RAN.nextBytes(bytes); final int len=bytes.length < remaining ? bytes.length : remaining; out.write(bytes,0,len); out.hflush(); remaining-=len; } final ReplicaBeingWritten oldrbw; final DataNode newnode; final DatanodeInfo newnodeinfo; final String bpid=cluster.getNamesystem().getBlockPoolId(); { final DataNode oldnode=cluster.getDataNodes().get(0); oldrbw=getRbw(oldnode,bpid); LOG.info("oldrbw = " + oldrbw); cluster.startDataNodes(conf,1,true,null,null); newnode=cluster.getDataNodes().get(REPLICATION); final DatanodeInfo oldnodeinfo; { final DatanodeInfo[] datatnodeinfos=cluster.getNameNodeRpc().getDatanodeReport(DatanodeReportType.LIVE); Assert.assertEquals(2,datatnodeinfos.length); int i=0; for (DatanodeRegistration dnReg=newnode.getDNRegistrationForBP(bpid); i < datatnodeinfos.length && !datatnodeinfos[i].equals(dnReg); i++) ; Assert.assertTrue(i < datatnodeinfos.length); newnodeinfo=datatnodeinfos[i]; oldnodeinfo=datatnodeinfos[1 - i]; } final ExtendedBlock b=new ExtendedBlock(bpid,oldrbw.getBlockId(),oldrbw.getBytesAcked(),oldrbw.getGenerationStamp()); final BlockOpResponseProto s=DFSTestUtil.transferRbw(b,DFSClientAdapter.getDFSClient(fs),oldnodeinfo,newnodeinfo); Assert.assertEquals(Status.SUCCESS,s.getStatus()); } final ReplicaBeingWritten newrbw=getRbw(newnode,bpid); LOG.info("newrbw = " + newrbw); Assert.assertEquals(oldrbw.getBlockId(),newrbw.getBlockId()); Assert.assertEquals(oldrbw.getGenerationStamp(),newrbw.getGenerationStamp()); Assert.assertEquals(oldrbw.getVisibleLength(),newrbw.getVisibleLength()); LOG.info("DONE"); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.datanode.fsdataset.TestAvailableSpaceVolumeChoosingPolicy

EqualityVerifier 
@Test(timeout=60000) public void testNotEnoughSpaceOnSelectedVolume() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy=ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class,null); List volumes=new ArrayList(); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3); initPolicy(policy,0.0f); Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,1024L * 1024L * 2)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testThreeUnbalancedVolumes() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy=ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class,null); List volumes=new ArrayList(); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(2).getAvailable()).thenReturn(1024L * 1024L * 3); initPolicy(policy,1.0f); Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(2),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(2),policy.chooseVolume(volumes,100)); initPolicy(policy,0.0f); Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100)); }

EqualityVerifier 
@Test(timeout=60000) public void testAvailableSpaceChanges() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy=ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class,null); initPolicy(policy,1.0f); List volumes=new ArrayList(); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3).thenReturn(1024L * 1024L * 3).thenReturn(1024L * 1024L * 3).thenReturn(1024L * 1024L * 1); Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testTwoUnbalancedVolumes() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy=ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class,null); initPolicy(policy,1.0f); List volumes=new ArrayList(); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L * 3); Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testFourUnbalancedVolumes() throws Exception { @SuppressWarnings("unchecked") final AvailableSpaceVolumeChoosingPolicy policy=ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class,null); List volumes=new ArrayList(); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(0).getAvailable()).thenReturn(1024L * 1024L); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(1).getAvailable()).thenReturn(1024L * 1024L + 1); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(2).getAvailable()).thenReturn(1024L * 1024L * 3); volumes.add(Mockito.mock(FsVolumeSpi.class)); Mockito.when(volumes.get(3).getAvailable()).thenReturn(1024L * 1024L * 3); initPolicy(policy,1.0f); Assert.assertEquals(volumes.get(2),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(3),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(2),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(3),policy.chooseVolume(volumes,100)); initPolicy(policy,0.0f); Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(0),policy.chooseVolume(volumes,100)); Assert.assertEquals(volumes.get(1),policy.chooseVolume(volumes,100)); }

Class: org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestDatanodeRestart

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testRecoverReplicas() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024L); conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,512); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); try { FileSystem fs=cluster.getFileSystem(); for (int i=0; i < 4; i++) { Path fileName=new Path("/test" + i); DFSTestUtil.createFile(fs,fileName,1,(short)1,0L); DFSTestUtil.waitReplication(fs,fileName,(short)1); } String bpid=cluster.getNamesystem().getBlockPoolId(); DataNode dn=cluster.getDataNodes().get(0); Iterator replicasItor=dataset(dn).volumeMap.replicas(bpid).iterator(); ReplicaInfo replica=replicasItor.next(); createUnlinkTmpFile(replica,true,true); createUnlinkTmpFile(replica,false,true); replica=replicasItor.next(); createUnlinkTmpFile(replica,true,false); createUnlinkTmpFile(replica,false,false); replica=replicasItor.next(); createUnlinkTmpFile(replica,true,true); createUnlinkTmpFile(replica,false,false); cluster.restartDataNodes(); cluster.waitActive(); dn=cluster.getDataNodes().get(0); Collection replicas=dataset(dn).volumeMap.replicas(bpid); Assert.assertEquals(4,replicas.size()); replicasItor=replicas.iterator(); while (replicasItor.hasNext()) { Assert.assertEquals(ReplicaState.FINALIZED,replicasItor.next().getState()); } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestFsDatasetImpl

TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier 
@Before public void setUp() throws IOException { final DataNode datanode=Mockito.mock(DataNode.class); storage=Mockito.mock(DataStorage.class); Configuration conf=new Configuration(); final DNConf dnConf=new DNConf(conf); when(datanode.getConf()).thenReturn(conf); when(datanode.getDnConf()).thenReturn(dnConf); createStorageDirs(storage,conf,NUM_INIT_VOLUMES); dataset=new FsDatasetImpl(datanode,storage,conf); assertEquals(NUM_INIT_VOLUMES,dataset.getVolumes().size()); assertEquals(0,dataset.getNumFailedVolumes()); }

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testAddVolumes() throws IOException { final int numNewVolumes=3; final int numExistingVolumes=dataset.getVolumes().size(); final int totalVolumes=numNewVolumes + numExistingVolumes; List newLocations=new ArrayList(); for (int i=0; i < numNewVolumes; i++) { String path=BASE_DIR + "/newData" + i; newLocations.add(StorageLocation.parse(path)); when(storage.getStorageDir(numExistingVolumes + i)).thenReturn(new Storage.StorageDirectory(new File(path))); } when(storage.getNumStorageDirs()).thenReturn(totalVolumes); dataset.addVolumes(newLocations); assertEquals(totalVolumes,dataset.getVolumes().size()); for (int i=0; i < numNewVolumes; i++) { assertEquals(newLocations.get(i).getFile().getPath(),dataset.getVolumes().get(numExistingVolumes + i).getBasePath()); } }

Class: org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodeProtocol

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for{@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock,long,long)} */ @Test public void testUpdateReplicaUnderRecovery() throws IOException { MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); String bpid=cluster.getNamesystem().getBlockPoolId(); DistributedFileSystem dfs=cluster.getFileSystem(); String filestr="/foo"; Path filepath=new Path(filestr); DFSTestUtil.createFile(dfs,filepath,1024L,(short)3,0L); final LocatedBlock locatedblock=getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(),filestr); final DatanodeInfo[] datanodeinfo=locatedblock.getLocations(); Assert.assertTrue(datanodeinfo.length > 0); final DataNode datanode=cluster.getDataNode(datanodeinfo[0].getIpcPort()); Assert.assertTrue(datanode != null); final ExtendedBlock b=locatedblock.getBlock(); final long recoveryid=b.getGenerationStamp() + 1; final long newlength=b.getNumBytes() - 1; final FsDatasetSpi fsdataset=DataNodeTestUtils.getFSDataset(datanode); final ReplicaRecoveryInfo rri=fsdataset.initReplicaRecovery(new RecoveringBlock(b,null,recoveryid)); final ReplicaInfo replica=FsDatasetTestUtil.fetchReplicaInfo(fsdataset,bpid,b.getBlockId()); Assert.assertEquals(ReplicaState.RUR,replica.getState()); FsDatasetImpl.checkReplicaFiles(replica); { final ExtendedBlock tmp=new ExtendedBlock(b.getBlockPoolId(),rri.getBlockId(),rri.getNumBytes() - 1,rri.getGenerationStamp()); try { fsdataset.updateReplicaUnderRecovery(tmp,recoveryid,newlength); Assert.fail(); } catch ( IOException ioe) { System.out.println("GOOD: getting " + ioe); } } final String storageID=fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(),rri),recoveryid,newlength); assertTrue(storageID != null); } finally { if (cluster != null) cluster.shutdown(); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test {@link FsDatasetImpl#initReplicaRecovery(String,ReplicaMap,Block,long,long)} */ @Test public void testInitReplicaRecovery() throws IOException { final long firstblockid=10000L; final long gs=7777L; final long length=22L; final ReplicaMap map=new ReplicaMap(this); String bpid="BP-TEST"; final Block[] blocks=new Block[5]; for (int i=0; i < blocks.length; i++) { blocks[i]=new Block(firstblockid + i,length,gs); map.add(bpid,createReplicaInfo(blocks[i])); } { final Block b=blocks[0]; final ReplicaInfo originalInfo=map.get(bpid,b); final long recoveryid=gs + 1; final ReplicaRecoveryInfo recoveryInfo=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); assertEquals(originalInfo,recoveryInfo); final ReplicaUnderRecovery updatedInfo=(ReplicaUnderRecovery)map.get(bpid,b); Assert.assertEquals(originalInfo.getBlockId(),updatedInfo.getBlockId()); Assert.assertEquals(recoveryid,updatedInfo.getRecoveryID()); final long recoveryid2=gs + 2; final ReplicaRecoveryInfo recoveryInfo2=FsDatasetImpl.initReplicaRecovery(bpid,map,blocks[0],recoveryid2,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); assertEquals(originalInfo,recoveryInfo2); final ReplicaUnderRecovery updatedInfo2=(ReplicaUnderRecovery)map.get(bpid,b); Assert.assertEquals(originalInfo.getBlockId(),updatedInfo2.getBlockId()); Assert.assertEquals(recoveryid2,updatedInfo2.getRecoveryID()); try { FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); Assert.fail(); } catch ( RecoveryInProgressException ripe) { System.out.println("GOOD: getting " + ripe); } } { final long recoveryid=gs + 1; final Block b=new Block(firstblockid - 1,length,gs); ReplicaRecoveryInfo r=FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); Assert.assertNull("Data-node should not have this replica.",r); } { final long recoveryid=gs - 1; final Block b=new Block(firstblockid + 1,length,gs); try { FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); Assert.fail(); } catch ( IOException ioe) { System.out.println("GOOD: getting " + ioe); } } { final long recoveryid=gs + 1; final Block b=new Block(firstblockid,length,gs + 1); try { FsDatasetImpl.initReplicaRecovery(bpid,map,b,recoveryid,DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT); fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs"); } catch ( IOException e) { e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block="); } } }

Class: org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAclEntriesOnlyAccess() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0640)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,USER,"bar",READ_WRITE),aclEntry(ACCESS,GROUP,READ_WRITE),aclEntry(ACCESS,OTHER,NONE)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo")); fs.removeAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bar",READ_WRITE),aclEntry(ACCESS,GROUP,READ_WRITE)},returned); assertPermission((short)010760); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveDefaultAclOnlyDefault() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); fs.removeDefaultAcl(path); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission((short)0750); assertAclFeature(false); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAcl() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); fs.removeAcl(path); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission((short)0750); assertAclFeature(false); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetAclOnlyDefault() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testModifyAclEntriesMinimalDefault() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)); fs.modifyAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetAclMinimal() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0644)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE)); fs.setAcl(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission((short)0640); assertAclFeature(false); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultAclNewFileWithMode() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0755)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); Path filePath=new Path(path,"file1"); int bufferSize=cluster.getConfiguration(0).getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_DEFAULT); fs.create(filePath,new FsPermission((short)0740),false,bufferSize,fs.getDefaultReplication(filePath),fs.getDefaultBlockSize(path),null).close(); AclStatus s=fs.getAclStatus(filePath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned); assertPermission(filePath,(short)010740); assertAclFeature(filePath,true); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAclOnlyDefault() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); fs.removeAcl(path); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission((short)0750); assertAclFeature(false); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAclEntriesMinimal() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0760)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_WRITE),aclEntry(ACCESS,OTHER,NONE)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo"),aclEntry(ACCESS,MASK)); fs.removeAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission((short)0760); assertAclFeature(false); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultMinimalAclNewFile() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)); fs.setAcl(path,aclSpec); Path filePath=new Path(path,"file1"); fs.create(filePath).close(); AclStatus s=fs.getAclStatus(filePath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission(filePath,(short)0640); assertAclFeature(filePath,false); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAclMinimalAcl() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0640)); fs.removeAcl(path); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission((short)0640); assertAclFeature(false); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetAclCustomMask() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0640)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,MASK,ALL),aclEntry(ACCESS,OTHER,NONE)); fs.setAcl(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)},returned); assertPermission((short)010670); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetPermission() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); fs.setPermission(path,FsPermission.createImmutable((short)0700)); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010700); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetAclStickyBit() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)01750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)011770); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testModifyAclEntriesMinimal() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0640)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",READ_WRITE)); fs.modifyAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ_WRITE),aclEntry(ACCESS,GROUP,READ)},returned); assertPermission((short)010660); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAclStickyBit() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)01750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); fs.removeAcl(path); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission((short)01750); assertAclFeature(false); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSetPermissionCannotSetAclBit() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setPermission(path,FsPermission.createImmutable((short)0700)); assertPermission((short)0700); fs.setPermission(path,new FsAclPermission(FsPermission.createImmutable((short)0755))); INode inode=cluster.getNamesystem().getFSDirectory().getNode(path.toUri().getPath(),false); assertNotNull(inode); FsPermission perm=inode.getFsPermission(); assertNotNull(perm); assertEquals(0755,perm.toShort()); assertEquals(0755,perm.toExtendedShort()); assertAclFeature(false); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultAclNewSymlinkIntermediate() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); Path filePath=new Path(path,"file1"); fs.create(filePath).close(); fs.setPermission(filePath,FsPermission.createImmutable((short)0640)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); Path dirPath=new Path(path,"dir1"); Path linkPath=new Path(dirPath,"link1"); fs.createSymlink(filePath,linkPath,true); AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)}; AclStatus s=fs.getAclStatus(dirPath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission(dirPath,(short)010750); assertAclFeature(dirPath,true); expected=new AclEntry[]{}; s=fs.getAclStatus(linkPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission(linkPath,(short)0640); assertAclFeature(linkPath,false); s=fs.getAclStatus(filePath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission(filePath,(short)0640); assertAclFeature(filePath,false); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAclEntries() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo"),aclEntry(DEFAULT,USER,"foo")); fs.removeAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetAcl() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010770); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAclEntriesOnlyDefault() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,USER,"bar",READ_EXECUTE)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo")); fs.removeAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bar",READ_EXECUTE),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultAclRenamedDir() throws Exception { Path dirPath=new Path(path,"dir"); FileSystem.mkdirs(fs,dirPath,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(dirPath,aclSpec); Path subdirPath=new Path(path,"subdir"); FileSystem.mkdirs(fs,subdirPath,FsPermission.createImmutable((short)0750)); Path renamedSubdirPath=new Path(dirPath,"subdir"); fs.rename(subdirPath,renamedSubdirPath); AclEntry[] expected=new AclEntry[]{}; AclStatus s=fs.getAclStatus(renamedSubdirPath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission(renamedSubdirPath,(short)0750); assertAclFeature(renamedSubdirPath,false); }

InternalCallVerifier EqualityVerifier 
@Test public void testModifyAclEntriesStickyBit() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)01750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",READ_EXECUTE),aclEntry(DEFAULT,USER,"foo",READ_EXECUTE)); fs.modifyAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ_EXECUTE),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",READ_EXECUTE),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)011750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultAclNewFile() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); Path filePath=new Path(path,"file1"); fs.create(filePath).close(); AclStatus s=fs.getAclStatus(filePath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned); assertPermission(filePath,(short)010640); assertAclFeature(filePath,true); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveDefaultAclOnlyAccess() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0640)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE)); fs.setAcl(path,aclSpec); fs.removeDefaultAcl(path); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned); assertPermission((short)010770); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveDefaultAclMinimal() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.removeDefaultAcl(path); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission((short)0750); assertAclFeature(false); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultAclNewDirIntermediate() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); Path dirPath=new Path(path,"dir1"); Path subdirPath=new Path(dirPath,"subdir1"); fs.mkdirs(subdirPath); AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)}; AclStatus s=fs.getAclStatus(dirPath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission(dirPath,(short)010750); assertAclFeature(dirPath,true); s=fs.getAclStatus(subdirPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission(subdirPath,(short)010750); assertAclFeature(subdirPath,true); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetPermissionOnlyAccess() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0640)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE)); fs.setAcl(path,aclSpec); fs.setPermission(path,FsPermission.createImmutable((short)0600)); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)},returned); assertPermission((short)010600); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultAclRenamedFile() throws Exception { Path dirPath=new Path(path,"dir"); FileSystem.mkdirs(fs,dirPath,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(dirPath,aclSpec); Path filePath=new Path(path,"file1"); fs.create(filePath).close(); fs.setPermission(filePath,FsPermission.createImmutable((short)0640)); Path renamedFilePath=new Path(dirPath,"file1"); fs.rename(filePath,renamedFilePath); AclEntry[] expected=new AclEntry[]{}; AclStatus s=fs.getAclStatus(renamedFilePath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission(renamedFilePath,(short)0640); assertAclFeature(renamedFilePath,false); }

InternalCallVerifier EqualityVerifier 
@Test public void testModifyAclEntriesOnlyAccess() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0640)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",READ_EXECUTE)); fs.modifyAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ_EXECUTE),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned); assertPermission((short)010750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultAclNewDir() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); Path dirPath=new Path(path,"dir1"); fs.mkdirs(dirPath); AclStatus s=fs.getAclStatus(dirPath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission(dirPath,(short)010750); assertAclFeature(dirPath,true); }

InternalCallVerifier EqualityVerifier 
@Test public void testOnlyAccessAclNewFile() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",ALL)); fs.modifyAclEntries(path,aclSpec); Path filePath=new Path(path,"file1"); fs.create(filePath).close(); AclStatus s=fs.getAclStatus(filePath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission(filePath,(short)0644); assertAclFeature(filePath,false); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAclEntriesMinimalDefault() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo"),aclEntry(ACCESS,MASK),aclEntry(DEFAULT,USER,"foo"),aclEntry(DEFAULT,MASK)); fs.removeAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetAclOnlyAccess() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0640)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_WRITE),aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE)); fs.setAcl(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ),aclEntry(ACCESS,GROUP,READ)},returned); assertPermission((short)010640); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testModifyAclEntriesCustomMask() throws IOException { fs.create(path).close(); fs.setPermission(path,FsPermission.createImmutable((short)0640)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,MASK,NONE)); fs.modifyAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ)},returned); assertPermission((short)010600); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testModifyAclEntriesOnlyDefault() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",READ_EXECUTE)); fs.modifyAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",READ_EXECUTE),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultAclNewFileIntermediate() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); Path dirPath=new Path(path,"dir1"); Path filePath=new Path(dirPath,"file1"); fs.create(filePath).close(); AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)}; AclStatus s=fs.getAclStatus(dirPath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission(dirPath,(short)010750); assertAclFeature(dirPath,true); expected=new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)}; s=fs.getAclStatus(filePath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission(filePath,(short)010640); assertAclFeature(filePath,true); }

InternalCallVerifier EqualityVerifier 
@Test public void testOnlyAccessAclNewDir() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",ALL)); fs.modifyAclEntries(path,aclSpec); Path dirPath=new Path(path,"dir1"); fs.mkdirs(dirPath); AclStatus s=fs.getAclStatus(dirPath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); assertPermission(dirPath,(short)0755); assertAclFeature(dirPath,false); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultAclNewDirWithMode() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0755)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); Path dirPath=new Path(path,"dir1"); fs.mkdirs(dirPath,new FsPermission((short)0740)); AclStatus s=fs.getAclStatus(dirPath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,READ_EXECUTE)},returned); assertPermission(dirPath,(short)010740); assertAclFeature(dirPath,true); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAclEntriesStickyBit() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)01750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo"),aclEntry(DEFAULT,USER,"foo")); fs.removeAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)011750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultMinimalAclNewDir() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)); fs.setAcl(path,aclSpec); Path dirPath=new Path(path,"dir1"); fs.mkdirs(dirPath); AclStatus s=fs.getAclStatus(dirPath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission(dirPath,(short)010750); assertAclFeature(dirPath,true); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveDefaultAcl() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); fs.removeDefaultAcl(path); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned); assertPermission((short)010770); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetAclMinimalDefault() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)); fs.setAcl(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetPermissionOnlyDefault() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); fs.setPermission(path,FsPermission.createImmutable((short)0700)); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010700); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testModifyAclEntries() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"foo",READ_EXECUTE),aclEntry(DEFAULT,USER,"foo",READ_EXECUTE)); fs.modifyAclEntries(path,aclSpec); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",READ_EXECUTE),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"foo",READ_EXECUTE),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010750); assertAclFeature(true); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveDefaultAclStickyBit() throws IOException { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)01750)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"foo",ALL)); fs.setAcl(path,aclSpec); fs.removeDefaultAcl(path); AclStatus s=fs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"foo",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE)},returned); assertPermission((short)011770); assertAclFeature(true); }

Class: org.apache.hadoop.hdfs.server.namenode.FSXAttrBaseTest

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Tests for setting xattr * 1. Set xattr with XAttrSetFlag.CREATE|XAttrSetFlag.REPLACE flag. * 2. Set xattr with illegal name. * 3. Set xattr without XAttrSetFlag. * 4. Set xattr and total number exceeds max limit. * 5. Set xattr and name is too long. * 6. Set xattr and value is too long. */ @Test(timeout=120000) public void testSetXAttr() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); Map xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(value1,xattrs.get(name1)); fs.removeXAttr(path,name1); try { fs.setXAttr(path,null,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); Assert.fail("Setting xattr with null name should fail."); } catch ( NullPointerException e) { GenericTestUtils.assertExceptionContains("XAttr name cannot be null",e); } catch ( RemoteException e) { GenericTestUtils.assertExceptionContains("XAttr name cannot be null",e); } try { fs.setXAttr(path,"user.",value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); Assert.fail("Setting xattr with empty name should fail."); } catch ( RemoteException e) { assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName()); GenericTestUtils.assertExceptionContains("XAttr name cannot be empty",e); } catch ( HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("XAttr name cannot be empty",e); } try { fs.setXAttr(path,"a1",value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); Assert.fail("Setting xattr with invalid name prefix or without " + "name prefix should fail."); } catch ( RemoteException e) { assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName()); GenericTestUtils.assertExceptionContains("XAttr name must be prefixed",e); } catch ( HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains("XAttr name must be prefixed",e); } fs.setXAttr(path,name1,value1); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(value1,xattrs.get(name1)); fs.removeXAttr(path,name1); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(newValue1,xattrs.get(name1)); fs.removeXAttr(path,name1); fs.setXAttr(path,name1,value1); fs.setXAttr(path,name2,value2); fs.setXAttr(path,name3,null); try { fs.setXAttr(path,name4,null); Assert.fail("Setting xattr should fail if total number of xattrs " + "for inode exceeds max limit."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Cannot add additional XAttr",e); } fs.removeXAttr(path,name1); fs.removeXAttr(path,name2); fs.removeXAttr(path,name3); String longName="user.0123456789abcdefX"; try { fs.setXAttr(path,longName,null); Assert.fail("Setting xattr should fail if name is too long."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("XAttr is too big",e); GenericTestUtils.assertExceptionContains("total size is 17",e); } byte[] longValue=new byte[MAX_SIZE]; try { fs.setXAttr(path,"user.a",longValue); Assert.fail("Setting xattr should fail if value is too long."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("XAttr is too big",e); GenericTestUtils.assertExceptionContains("total size is 17",e); } String name="user.111"; byte[] value=new byte[MAX_SIZE - 3]; fs.setXAttr(path,name,value); }

InternalCallVerifier EqualityVerifier 
/** * Steps: * 1) Set xattrs on a file. * 2) Remove xattrs from that file. * 3) Save a checkpoint and restart NN. * 4) Set xattrs again on the same file. * 5) Remove xattrs from that file. * 6) Restart NN without saving a checkpoint. * 7) Set xattrs again on the same file. */ @Test(timeout=120000) public void testCleanupXAttrs() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); fs.removeXAttr(path,name1); fs.removeXAttr(path,name2); restart(true); initFileSystem(); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); fs.removeXAttr(path,name1); fs.removeXAttr(path,name2); restart(false); initFileSystem(); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); fs.removeXAttr(path,name1); fs.removeXAttr(path,name2); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); Map xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); }

InternalCallVerifier EqualityVerifier 
/** * Tests for removing xattr * 1. Remove xattr. * 2. Restart NN and save checkpoint scenarios. */ @Test(timeout=120000) public void testRemoveXAttr() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name3,null,EnumSet.of(XAttrSetFlag.CREATE)); fs.removeXAttr(path,name1); fs.removeXAttr(path,name2); Map xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(new byte[0],xattrs.get(name3)); restart(false); initFileSystem(); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(new byte[0],xattrs.get(name3)); restart(true); initFileSystem(); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(new byte[0],xattrs.get(name3)); fs.removeXAttr(path,name3); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=120000) public void testRenameFileWithXAttr() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); Path renamePath=new Path(path.toString() + "-rename"); fs.rename(path,renamePath); Map xattrs=fs.getXAttrs(renamePath); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); fs.removeXAttr(renamePath,name1); fs.removeXAttr(renamePath,name2); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Tests for replacing xattr * 1. Replace an xattr using XAttrSetFlag.REPLACE. * 2. Replace an xattr which doesn't exist and expect an exception. * 3. Create multiple xattrs and replace some. * 4. Restart NN and save checkpoint scenarios. */ @Test(timeout=120000) public void testReplaceXAttr() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name1,newValue1,EnumSet.of(XAttrSetFlag.REPLACE)); Map xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(newValue1,xattrs.get(name1)); fs.removeXAttr(path,name1); try { fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.REPLACE)); Assert.fail("Replacing xattr which does not exist should fail."); } catch ( IOException e) { } fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,null,EnumSet.of(XAttrSetFlag.REPLACE)); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); restart(false); initFileSystem(); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); restart(true); initFileSystem(); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); fs.removeXAttr(path,name1); fs.removeXAttr(path,name2); }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * removexattr tests. Test that removexattr throws an exception if any of * the following are true: * an xattr that was requested doesn't exist * the caller specifies an unknown namespace * the caller doesn't have access to the namespace * the caller doesn't have permission to get the value of the xattr * the caller does not have "execute" (scan) access to the parent directory * the caller has only read access to the owning directory * the caller has only execute access to the owning directory and execute * access to the actual entity * the caller does not have execute access to the owning directory and write * access to the actual entity */ @Test(timeout=120000) public void testRemoveXAttrPermissions() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name3,null,EnumSet.of(XAttrSetFlag.CREATE)); try { fs.removeXAttr(path,name2); fs.removeXAttr(path,name2); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("No matching attributes found",e); } final String expectedExceptionString="An XAttr name must be prefixed " + "with user/trusted/security/system/raw, followed by a '.'"; try { fs.removeXAttr(path,"wackynamespace.foo"); Assert.fail("expected IOException"); } catch ( RemoteException e) { assertEquals("Unexpected RemoteException: " + e,e.getClassName(),HadoopIllegalArgumentException.class.getCanonicalName()); GenericTestUtils.assertExceptionContains(expectedExceptionString,e); } catch ( HadoopIllegalArgumentException e) { GenericTestUtils.assertExceptionContains(expectedExceptionString,e); } final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); fs.setXAttr(path,"trusted.foo","1234".getBytes()); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(path,"trusted.foo"); return null; } } ); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("User doesn't have permission",e); } finally { fs.removeXAttr(path,"trusted.foo"); } fs.setPermission(path,new FsPermission((short)0700)); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(path,name1); return null; } } ); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Permission denied",e); } final Path childDir=new Path(path,"child" + pathCount); FileSystem.mkdirs(fs,childDir,FsPermission.createImmutable((short)0700)); fs.setXAttr(childDir,name1,"1234".getBytes()); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(childDir,name1); return null; } } ); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Permission denied",e); } fs.setPermission(path,new FsPermission((short)0704)); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(childDir,name1); return null; } } ); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Permission denied",e); } fs.setPermission(path,new FsPermission((short)0701)); fs.setPermission(childDir,new FsPermission((short)0701)); try { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(childDir,name1); return null; } } ); Assert.fail("expected IOException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Permission denied",e); } fs.setPermission(path,new FsPermission((short)0701)); fs.setPermission(childDir,new FsPermission((short)0706)); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); userFs.removeXAttr(childDir,name1); return null; } } ); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Tests for creating xattr * 1. Create an xattr using XAttrSetFlag.CREATE. * 2. Create an xattr which already exists and expect an exception. * 3. Create multiple xattrs. * 4. Restart NN and save checkpoint scenarios. */ @Test(timeout=120000) public void testCreateXAttr() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); Map xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(value1,xattrs.get(name1)); fs.removeXAttr(path,name1); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),0); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); try { fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); Assert.fail("Creating xattr which already exists should fail."); } catch ( IOException e) { } fs.removeXAttr(path,name1); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,null,EnumSet.of(XAttrSetFlag.CREATE)); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); restart(false); initFileSystem(); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); restart(true); initFileSystem(); xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(new byte[0],xattrs.get(name2)); fs.removeXAttr(path,name1); fs.removeXAttr(path,name2); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testRawXAttrs() throws Exception { final UserGroupInformation user=UserGroupInformation.createUserForTesting("user",new String[]{"mygroup"}); FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); { final byte[] value=fs.getXAttr(rawPath,raw1); Assert.assertArrayEquals(value,value1); } { final Map xattrs=fs.getXAttrs(rawPath); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(value1,xattrs.get(raw1)); fs.removeXAttr(rawPath,raw1); } { fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(rawPath,raw1,newValue1,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); final Map xattrs=fs.getXAttrs(rawPath); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(newValue1,xattrs.get(raw1)); fs.removeXAttr(rawPath,raw1); } { fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE)); final List xattrNames=fs.listXAttrs(rawPath); assertTrue(xattrNames.contains(raw1)); assertTrue(xattrNames.contains(raw2)); assertTrue(xattrNames.size() == 2); fs.removeXAttr(rawPath,raw1); fs.removeXAttr(rawPath,raw2); } { fs.setXAttr(rawPath,raw1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(rawPath,raw2,value2,EnumSet.of(XAttrSetFlag.CREATE)); final List xattrNames=fs.listXAttrs(path); assertTrue(xattrNames.size() == 0); fs.removeXAttr(rawPath,raw1); fs.removeXAttr(rawPath,raw2); } { user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); try { userFs.setXAttr(path,raw1,value1); fail("setXAttr should have thrown"); } catch ( AccessControlException e) { } try { userFs.setXAttr(rawPath,raw1,value1); fail("setXAttr should have thrown"); } catch ( AccessControlException e) { } try { userFs.getXAttrs(rawPath); fail("getXAttrs should have thrown"); } catch ( AccessControlException e) { } try { userFs.getXAttrs(path); fail("getXAttrs should have thrown"); } catch ( AccessControlException e) { } try { userFs.getXAttr(rawPath,raw1); fail("getXAttr should have thrown"); } catch ( AccessControlException e) { } try { userFs.getXAttr(path,raw1); fail("getXAttr should have thrown"); } catch ( AccessControlException e) { } return null; } } ); } { fs.setXAttr(rawPath,raw1,value1); user.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws Exception { final FileSystem userFs=dfsCluster.getFileSystem(); try { userFs.getXAttr(rawPath,raw1); fail("getXAttr should have thrown"); } catch ( AccessControlException e) { } try { userFs.getXAttr(path,raw1); fail("getXAttr should have thrown"); } catch ( AccessControlException e) { } final List xattrNames=userFs.listXAttrs(path); assertTrue(xattrNames.size() == 0); try { userFs.listXAttrs(rawPath); fail("listXAttrs on raw path should have thrown"); } catch ( AccessControlException e) { } return null; } } ); fs.removeXAttr(rawPath,raw1); } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testXAttrAcl() throws Exception { FileSystem.mkdirs(fs,path,FsPermission.createImmutable((short)0750)); fs.setOwner(path,BRUCE.getUserName(),null); FileSystem fsAsBruce=createFileSystem(BRUCE); FileSystem fsAsDiana=createFileSystem(DIANA); fsAsBruce.setXAttr(path,name1,value1); Map xattrs; try { xattrs=fsAsDiana.getXAttrs(path); Assert.fail("Diana should not have read access to get xattrs"); } catch ( AccessControlException e) { } fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),READ))); xattrs=fsAsDiana.getXAttrs(path); Assert.assertArrayEquals(value1,xattrs.get(name1)); try { fsAsDiana.removeXAttr(path,name1); Assert.fail("Diana should not have write access to remove xattrs"); } catch ( AccessControlException e) { } try { fsAsDiana.setXAttr(path,name2,value2); Assert.fail("Diana should not have write access to set xattrs"); } catch ( AccessControlException e) { } fsAsBruce.modifyAclEntries(path,Lists.newArrayList(aclEntry(ACCESS,USER,DIANA.getUserName(),ALL))); fsAsDiana.setXAttr(path,name2,value2); Assert.assertArrayEquals(value2,fsAsDiana.getXAttrs(path).get(name2)); fsAsDiana.removeXAttr(path,name1); fsAsDiana.removeXAttr(path,name2); }

Class: org.apache.hadoop.hdfs.server.namenode.TestAclTransformation

EqualityVerifier 
@Test public void testFilterAclEntriesByAclSpecAccessMaskCalculated() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_WRITE)).add(aclEntry(ACCESS,OTHER,READ)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"diana")); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).build(); assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesAutomaticDefaultGroup() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,READ_EXECUTE),aclEntry(DEFAULT,OTHER,READ)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,OTHER,READ)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testReplaceAclEntriesDefaultMaskPreserved() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_WRITE)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",READ),aclEntry(ACCESS,USER,"diana",READ_WRITE),aclEntry(ACCESS,GROUP,ALL),aclEntry(ACCESS,OTHER,READ)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,replaceAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterAclEntriesByAclSpec() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,USER,"diana",READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,GROUP,"sales",READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"execs",READ_WRITE)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,READ)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"diana"),aclEntry(ACCESS,GROUP,"sales")); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,GROUP,"execs",READ_WRITE)).add(aclEntry(ACCESS,MASK,READ_WRITE)).add(aclEntry(ACCESS,OTHER,READ)).build(); assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterAclEntriesByAclSpecDefaultMaskCalculated() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"diana")); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesProvidedAccessMask() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,MASK,ALL)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testReplaceAclEntries() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",READ_WRITE),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"sales",ALL),aclEntry(ACCESS,MASK,ALL),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",READ_WRITE),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"sales",ALL),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,"sales",ALL)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,replaceAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesDefaultMaskPreserved() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"diana",FsAction.READ_EXECUTE)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"diana",READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_EXECUTE)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testReplaceAclEntriesOnlyDefaults() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"bruce",READ)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,replaceAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesDefaultMaskCalculated() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"bruce",READ_WRITE),aclEntry(DEFAULT,USER,"diana",READ_EXECUTE)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ_WRITE)).add(aclEntry(DEFAULT,USER,"diana",READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterAclEntriesByAclSpecAutomaticDefaultUser() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec)); }

EqualityVerifier 
@Test public void testReplaceAclEntriesUnchanged() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",ALL)).add(aclEntry(DEFAULT,GROUP,READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,"sales",ALL)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"sales",ALL),aclEntry(ACCESS,MASK,ALL),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"sales",ALL),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)); assertEquals(existing,replaceAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesMultipleNewBeforeExisting() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"diana",READ)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,MASK,READ_EXECUTE)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,USER,"clark",READ_EXECUTE),aclEntry(ACCESS,USER,"diana",READ_EXECUTE)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE)).add(aclEntry(ACCESS,USER,"clark",READ_EXECUTE)).add(aclEntry(ACCESS,USER,"diana",READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,MASK,READ_EXECUTE)).add(aclEntry(ACCESS,OTHER,NONE)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterAclEntriesByAclSpecUnchanged() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"clark"),aclEntry(ACCESS,GROUP,"execs")); assertEquals(existing,filterAclEntriesByAclSpec(existing,aclSpec)); }

EqualityVerifier 
@Test public void testReplaceAclEntriesAutomaticDefaultOther() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,READ_WRITE),aclEntry(DEFAULT,USER,"bruce",READ),aclEntry(DEFAULT,GROUP,READ_WRITE),aclEntry(DEFAULT,MASK,READ_WRITE)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ_WRITE)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,replaceAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesEmptyAclSpec() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,READ)).build(); List aclSpec=Lists.newArrayList(); assertEquals(existing,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesUnchanged() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",ALL)).add(aclEntry(DEFAULT,GROUP,READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,"sales",ALL)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",ALL),aclEntry(ACCESS,GROUP,READ_EXECUTE),aclEntry(ACCESS,GROUP,"sales",ALL),aclEntry(ACCESS,MASK,ALL),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",ALL),aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,GROUP,"sales",ALL),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)); assertEquals(existing,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterDefaultAclEntries() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",READ_EXECUTE)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,GROUP,"sales",READ_EXECUTE)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,READ_EXECUTE)).build(); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",READ_EXECUTE)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build(); assertEquals(expected,filterDefaultAclEntries(existing)); }

EqualityVerifier 
@Test public void testMergeAclEntries() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",ALL)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterAclEntriesByAclSpecAccessMaskPreserved() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"diana")); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesProvidedDefaultMask() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,GROUP,READ),aclEntry(DEFAULT,MASK,ALL),aclEntry(DEFAULT,OTHER,NONE)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testReplaceAclEntriesAccessMaskPreserved() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",READ),aclEntry(DEFAULT,GROUP,READ),aclEntry(DEFAULT,OTHER,NONE)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,replaceAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesAccessMaskPreserved() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"diana",READ_EXECUTE)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ_EXECUTE)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterAclEntriesByAclSpecDefaultMaskPreserved() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_WRITE)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"diana")); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"diana",ALL)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesAccessMaskCalculated() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ)).add(aclEntry(ACCESS,OTHER,READ)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,USER,"diana",READ)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_EXECUTE)).add(aclEntry(ACCESS,USER,"diana",READ)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_EXECUTE)).add(aclEntry(ACCESS,OTHER,READ)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterAclEntriesByAclSpecAutomaticDefaultOther() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,OTHER)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,READ)).build(); assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec)); }

EqualityVerifier 
@Test public void testReplaceAclEntriesAutomaticDefaultUser() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,"bruce",READ),aclEntry(DEFAULT,GROUP,READ_WRITE),aclEntry(DEFAULT,MASK,READ_WRITE),aclEntry(DEFAULT,OTHER,READ)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ_WRITE)).add(aclEntry(DEFAULT,MASK,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,READ)).build(); assertEquals(expected,replaceAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testReplaceAclEntriesAutomaticDefaultGroup() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,NONE),aclEntry(DEFAULT,USER,READ_WRITE),aclEntry(DEFAULT,USER,"bruce",READ),aclEntry(DEFAULT,MASK,READ),aclEntry(DEFAULT,OTHER,READ)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,READ)).add(aclEntry(DEFAULT,OTHER,READ)).build(); assertEquals(expected,replaceAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testReplaceAclEntriesAccessMaskCalculated() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",READ),aclEntry(ACCESS,USER,"diana",READ_WRITE),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,READ)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ)).add(aclEntry(ACCESS,USER,"diana",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,READ_WRITE)).add(aclEntry(ACCESS,OTHER,READ)).build(); assertEquals(expected,replaceAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesAutomaticDefaultUser() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,GROUP,READ_EXECUTE),aclEntry(DEFAULT,OTHER,READ)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,GROUP,READ_EXECUTE)).add(aclEntry(DEFAULT,OTHER,READ)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterAclEntriesByAclSpecAutomaticDefaultGroup() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ_WRITE)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,GROUP)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,filterAclEntriesByAclSpec(existing,aclSpec)); }

EqualityVerifier 
@Test public void testMergeAclEntriesAutomaticDefaultOther() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).build(); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,READ_EXECUTE),aclEntry(DEFAULT,GROUP,READ_EXECUTE)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,NONE)).add(aclEntry(DEFAULT,USER,READ_EXECUTE)).add(aclEntry(DEFAULT,GROUP,READ_EXECUTE)).add(aclEntry(DEFAULT,OTHER,NONE)).build(); assertEquals(expected,mergeAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testReplaceAclEntriesDefaultMaskCalculated() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).build(); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,GROUP,READ),aclEntry(ACCESS,OTHER,READ),aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",READ),aclEntry(DEFAULT,USER,"diana",READ_WRITE),aclEntry(DEFAULT,GROUP,ALL),aclEntry(DEFAULT,OTHER,READ)); List expected=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ)).add(aclEntry(DEFAULT,USER,"diana",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,ALL)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,READ)).build(); assertEquals(expected,replaceAclEntries(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterAclEntriesByAclSpecEmptyAclSpec() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",READ_WRITE)).add(aclEntry(ACCESS,GROUP,READ)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,READ)).add(aclEntry(DEFAULT,USER,ALL)).add(aclEntry(DEFAULT,USER,"bruce",READ_WRITE)).add(aclEntry(DEFAULT,GROUP,READ)).add(aclEntry(DEFAULT,MASK,ALL)).add(aclEntry(DEFAULT,OTHER,READ)).build(); List aclSpec=Lists.newArrayList(); assertEquals(existing,filterAclEntriesByAclSpec(existing,aclSpec)); }

EqualityVerifier 
@Test public void testFilterDefaultAclEntriesUnchanged() throws AclException { List existing=new ImmutableList.Builder().add(aclEntry(ACCESS,USER,ALL)).add(aclEntry(ACCESS,USER,"bruce",ALL)).add(aclEntry(ACCESS,GROUP,READ_EXECUTE)).add(aclEntry(ACCESS,GROUP,"sales",ALL)).add(aclEntry(ACCESS,MASK,ALL)).add(aclEntry(ACCESS,OTHER,NONE)).build(); assertEquals(existing,filterDefaultAclEntries(existing)); }

Class: org.apache.hadoop.hdfs.server.namenode.TestAddBlock

InternalCallVerifier EqualityVerifier 
/** * Test adding new blocks. Restart the NameNode in the test to make sure the * AddBlockOp in the editlog is applied correctly. */ @Test public void testAddBlock() throws Exception { DistributedFileSystem fs=cluster.getFileSystem(); final Path file1=new Path("/file1"); final Path file2=new Path("/file2"); final Path file3=new Path("/file3"); final Path file4=new Path("/file4"); DFSTestUtil.createFile(fs,file1,BLOCKSIZE - 1,REPLICATION,0L); DFSTestUtil.createFile(fs,file2,BLOCKSIZE,REPLICATION,0L); DFSTestUtil.createFile(fs,file3,BLOCKSIZE * 2 - 1,REPLICATION,0L); DFSTestUtil.createFile(fs,file4,BLOCKSIZE * 2,REPLICATION,0L); cluster.restartNameNode(true); FSDirectory fsdir=cluster.getNamesystem().getFSDirectory(); INodeFile file1Node=fsdir.getINode4Write(file1.toString()).asFile(); BlockInfo[] file1Blocks=file1Node.getBlocks(); assertEquals(1,file1Blocks.length); assertEquals(BLOCKSIZE - 1,file1Blocks[0].getNumBytes()); assertEquals(BlockUCState.COMPLETE,file1Blocks[0].getBlockUCState()); INodeFile file2Node=fsdir.getINode4Write(file2.toString()).asFile(); BlockInfo[] file2Blocks=file2Node.getBlocks(); assertEquals(1,file2Blocks.length); assertEquals(BLOCKSIZE,file2Blocks[0].getNumBytes()); assertEquals(BlockUCState.COMPLETE,file2Blocks[0].getBlockUCState()); INodeFile file3Node=fsdir.getINode4Write(file3.toString()).asFile(); BlockInfo[] file3Blocks=file3Node.getBlocks(); assertEquals(2,file3Blocks.length); assertEquals(BLOCKSIZE,file3Blocks[0].getNumBytes()); assertEquals(BlockUCState.COMPLETE,file3Blocks[0].getBlockUCState()); assertEquals(BLOCKSIZE - 1,file3Blocks[1].getNumBytes()); assertEquals(BlockUCState.COMPLETE,file3Blocks[1].getBlockUCState()); INodeFile file4Node=fsdir.getINode4Write(file4.toString()).asFile(); BlockInfo[] file4Blocks=file4Node.getBlocks(); assertEquals(2,file4Blocks.length); assertEquals(BLOCKSIZE,file4Blocks[0].getNumBytes()); assertEquals(BlockUCState.COMPLETE,file4Blocks[0].getBlockUCState()); assertEquals(BLOCKSIZE,file4Blocks[1].getNumBytes()); assertEquals(BlockUCState.COMPLETE,file4Blocks[1].getBlockUCState()); }

InternalCallVerifier EqualityVerifier 
/** * Test adding new blocks but without closing the corresponding the file */ @Test public void testAddBlockUC() throws Exception { DistributedFileSystem fs=cluster.getFileSystem(); final Path file1=new Path("/file1"); DFSTestUtil.createFile(fs,file1,BLOCKSIZE - 1,REPLICATION,0L); FSDataOutputStream out=null; try { out=fs.append(file1); String appendContent="appending-content"; out.writeBytes(appendContent); ((DFSOutputStream)out.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); cluster.restartNameNode(true); FSDirectory fsdir=cluster.getNamesystem().getFSDirectory(); INodeFile fileNode=fsdir.getINode4Write(file1.toString()).asFile(); BlockInfo[] fileBlocks=fileNode.getBlocks(); assertEquals(2,fileBlocks.length); assertEquals(BLOCKSIZE,fileBlocks[0].getNumBytes()); assertEquals(BlockUCState.COMPLETE,fileBlocks[0].getBlockUCState()); assertEquals(appendContent.length() - 1,fileBlocks[1].getNumBytes()); assertEquals(BlockUCState.UNDER_CONSTRUCTION,fileBlocks[1].getBlockUCState()); } finally { if (out != null) { out.close(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestAddBlockRetry

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception { final String src="/testAddBlockRetryShouldReturnBlockWithLocations"; NamenodeProtocols nameNodeRpc=cluster.getNameNodeRpc(); nameNodeRpc.create(src,FsPermission.getFileDefault(),"clientName",new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)),true,(short)3,1024,null); LOG.info("Starting first addBlock for " + src); LocatedBlock lb1=nameNodeRpc.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null); assertTrue("Block locations should be present",lb1.getLocations().length > 0); cluster.restartNameNode(); nameNodeRpc=cluster.getNameNodeRpc(); LocatedBlock lb2=nameNodeRpc.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null); assertEquals("Blocks are not equal",lb1.getBlock(),lb2.getBlock()); assertTrue("Wrong locations with retry",lb2.getLocations().length > 0); }

BranchVerifier InternalCallVerifier EqualityVerifier 
/** * Retry addBlock() while another thread is in chooseTarget(). * See HDFS-4452. */ @Test public void testRetryAddBlockWhileInChooseTarget() throws Exception { final String src="/testRetryAddBlockWhileInChooseTarget"; FSNamesystem ns=cluster.getNamesystem(); BlockManager spyBM=spy(ns.getBlockManager()); final NamenodeProtocols nn=cluster.getNameNodeRpc(); Class nsClass=ns.getClass(); Field bmField=nsClass.getDeclaredField("blockManager"); bmField.setAccessible(true); bmField.set(ns,spyBM); doAnswer(new Answer(){ @Override public DatanodeStorageInfo[] answer( InvocationOnMock invocation) throws Throwable { LOG.info("chooseTarget for " + src); DatanodeStorageInfo[] ret=(DatanodeStorageInfo[])invocation.callRealMethod(); count++; if (count == 1) { LOG.info("Starting second addBlock for " + src); nn.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null); LocatedBlocks lbs=nn.getBlockLocations(src,0,Long.MAX_VALUE); assertEquals("Must be one block",1,lbs.getLocatedBlocks().size()); lb2=lbs.get(0); assertEquals("Wrong replication",REPLICATION,lb2.getLocations().length); } return ret; } } ).when(spyBM).chooseTarget(Mockito.anyString(),Mockito.anyInt(),Mockito.any(),Mockito.>any(),Mockito.anyLong(),Mockito.>any()); nn.create(src,FsPermission.getFileDefault(),"clientName",new EnumSetWritable(EnumSet.of(CreateFlag.CREATE)),true,(short)3,1024,null); LOG.info("Starting first addBlock for " + src); nn.addBlock(src,"clientName",null,null,INodeId.GRANDFATHER_INODE_ID,null); LocatedBlocks lbs=nn.getBlockLocations(src,0,Long.MAX_VALUE); assertEquals("Must be one block",1,lbs.getLocatedBlocks().size()); lb1=lbs.get(0); assertEquals("Wrong replication",REPLICATION,lb1.getLocations().length); assertEquals("Blocks are not equal",lb1.getBlock(),lb2.getBlock()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestAuditLogger

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Minor test related to HADOOP-9155. Verify that during a * FileSystem.setPermission() operation, the stat passed in during the * logAuditEvent() call returns the new permission rather than the old * permission. */ @Test public void testAuditLoggerWithSetPermission() throws IOException { Configuration conf=new HdfsConfiguration(); conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName()); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { cluster.waitClusterUp(); assertTrue(DummyAuditLogger.initialized); DummyAuditLogger.resetLogCount(); FileSystem fs=cluster.getFileSystem(); long time=System.currentTimeMillis(); final Path p=new Path("/"); fs.setTimes(p,time,time); fs.setPermission(p,new FsPermission(TEST_PERMISSION)); assertEquals(TEST_PERMISSION,DummyAuditLogger.foundPermission); assertEquals(2,DummyAuditLogger.logCount); } finally { cluster.shutdown(); } }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests that AuditLogger works as expected. */ @Test public void testAuditLogger() throws IOException { Configuration conf=new HdfsConfiguration(); conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName()); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); try { cluster.waitClusterUp(); assertTrue(DummyAuditLogger.initialized); DummyAuditLogger.resetLogCount(); FileSystem fs=cluster.getFileSystem(); long time=System.currentTimeMillis(); fs.setTimes(new Path("/"),time,time); assertEquals(1,DummyAuditLogger.logCount); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testWebHdfsAuditLogger() throws IOException, URISyntaxException { Configuration conf=new HdfsConfiguration(); conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,DummyAuditLogger.class.getName()); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).build(); GetOpParam.Op op=GetOpParam.Op.GETFILESTATUS; try { cluster.waitClusterUp(); assertTrue(DummyAuditLogger.initialized); URI uri=new URI("http",NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()),"/webhdfs/v1/",op.toQueryString(),null); HttpURLConnection conn=(HttpURLConnection)uri.toURL().openConnection(); conn.setRequestMethod(op.getType().toString()); conn.connect(); assertEquals(200,conn.getResponseCode()); conn.disconnect(); assertEquals(1,DummyAuditLogger.logCount); assertEquals("127.0.0.1",DummyAuditLogger.remoteAddr); conn=(HttpURLConnection)uri.toURL().openConnection(); conn.setRequestMethod(op.getType().toString()); conn.setRequestProperty("X-Forwarded-For","1.1.1.1"); conn.connect(); assertEquals(200,conn.getResponseCode()); conn.disconnect(); assertEquals(2,DummyAuditLogger.logCount); assertEquals("127.0.0.1",DummyAuditLogger.remoteAddr); conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS,"127.0.0.1"); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); conn=(HttpURLConnection)uri.toURL().openConnection(); conn.setRequestMethod(op.getType().toString()); conn.setRequestProperty("X-Forwarded-For","1.1.1.1"); conn.connect(); assertEquals(200,conn.getResponseCode()); conn.disconnect(); assertEquals(3,DummyAuditLogger.logCount); assertEquals("1.1.1.1",DummyAuditLogger.remoteAddr); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestAuditLogs

TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier 
@Before public void setupCluster() throws Exception { configureAuditLogs(); conf=new HdfsConfiguration(); final long precision=1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L); conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY,useAsyncLog); util=new DFSTestUtil.Builder().setName("TestAuditAllowed").setNumFiles(20).build(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs=cluster.getFileSystem(); util.createFiles(fs,fileName); Logger logger=((Log4JLogger)FSNamesystem.auditLog).getLogger(); @SuppressWarnings("unchecked") List appenders=Collections.list(logger.getAllAppenders()); assertEquals(1,appenders.size()); assertEquals(useAsyncLog,appenders.get(0) instanceof AsyncAppender); fnames=util.getFileNames(fileName); util.waitReplication(fs,fileName,(short)3); userGroupInfo=UserGroupInformation.createUserForTesting(username,groups); }

Class: org.apache.hadoop.hdfs.server.namenode.TestBackupNode

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Ensure that the backupnode will tail edits from the NN * and keep in sync, even while the NN rolls, checkpoints * occur, etc. */ @Test public void testBackupNodeTailsEdits() throws Exception { Configuration conf=new HdfsConfiguration(); HAUtil.setAllowStandbyReads(conf,true); MiniDFSCluster cluster=null; FileSystem fileSys=null; BackupNode backup=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); fileSys=cluster.getFileSystem(); backup=startBackupNode(conf,StartupOption.BACKUP,1); BackupImage bnImage=(BackupImage)backup.getFSImage(); testBNInSync(cluster,backup,1); NameNode nn=cluster.getNameNode(); NamenodeProtocols nnRpc=nn.getRpcServer(); nnRpc.rollEditLog(); assertEquals(bnImage.getEditLog().getCurSegmentTxId(),nn.getFSImage().getEditLog().getCurSegmentTxId()); testBNInSync(cluster,backup,2); long nnImageBefore=nn.getFSImage().getStorage().getMostRecentCheckpointTxId(); backup.doCheckpoint(); long nnImageAfter=nn.getFSImage().getStorage().getMostRecentCheckpointTxId(); assertTrue("nn should have received new checkpoint. before: " + nnImageBefore + " after: "+ nnImageAfter,nnImageAfter > nnImageBefore); testBNInSync(cluster,backup,3); StorageDirectory sd=bnImage.getStorage().getStorageDir(0); backup.stop(); backup=null; EditLogFile editsLog=FSImageTestUtil.findLatestEditsLog(sd); assertEquals(editsLog.getFirstTxId(),nn.getFSImage().getEditLog().getCurSegmentTxId()); assertTrue("Should not have finalized " + editsLog,editsLog.isInProgress()); assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down"))); backup=startBackupNode(conf,StartupOption.BACKUP,1); testBNInSync(cluster,backup,4); assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down",false)); backup.stop(false); assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down-2"))); } finally { LOG.info("Shutting down..."); if (backup != null) backup.stop(); if (fileSys != null) fileSys.close(); if (cluster != null) cluster.shutdown(); } assertStorageDirsMatch(cluster.getNameNode(),backup); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that a file can be read both from NameNode and BackupNode. */ @Test public void testCanReadData() throws IOException { Path file1=new Path("/fileToRead.dat"); Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; BackupNode backup=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); fileSys=cluster.getFileSystem(); long txid=cluster.getNameNodeRpc().getTransactionID(); backup=startBackupNode(conf,StartupOption.BACKUP,1); waitCheckpointDone(cluster,txid); String rpcAddrKeyPreffix=DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster"; String nnAddr=cluster.getNameNode().getNameNodeAddressHostPortString(); conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); String bnAddr=backup.getNameNodeAddressHostPortString(); conf.set(DFSConfigKeys.DFS_NAMESERVICES,"bnCluster"); conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID,"bnCluster"); conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster","nnActive, nnBackup"); conf.set(rpcAddrKeyPreffix + ".nnActive",nnAddr); conf.set(rpcAddrKeyPreffix + ".nnBackup",bnAddr); cluster.startDataNodes(conf,3,true,StartupOption.REGULAR,null); DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,(short)3,seed); FileSystem bnFS=FileSystem.get(new Path("hdfs://" + bnAddr).toUri(),conf); String nnData=DFSTestUtil.readFile(fileSys,file1); String bnData=DFSTestUtil.readFile(bnFS,file1); assertEquals("Data read from BackupNode and NameNode is not the same.",nnData,bnData); } catch ( IOException e) { LOG.error("Error in TestBackupNode: ",e); assertTrue(e.getLocalizedMessage(),false); } finally { if (fileSys != null) fileSys.close(); if (backup != null) backup.stop(); if (cluster != null) cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestBlockUnderConstruction

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test NameNode.getBlockLocations(..) on reading un-closed files. */ @Test public void testGetBlockLocations() throws IOException { final NamenodeProtocols namenode=cluster.getNameNodeRpc(); final Path p=new Path(BASE_DIR,"file2.dat"); final String src=p.toString(); final FSDataOutputStream out=TestFileCreation.createFile(hdfs,p,3); int len=BLOCK_SIZE >>> 1; writeFile(p,out,len); for (int i=1; i < NUM_BLOCKS; ) { final LocatedBlocks lb=namenode.getBlockLocations(src,0,len); final List blocks=lb.getLocatedBlocks(); assertEquals(i,blocks.size()); final Block b=blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); assertTrue(b instanceof BlockInfoUnderConstruction); if (++i < NUM_BLOCKS) { writeFile(p,out,BLOCK_SIZE); len+=BLOCK_SIZE; } } out.close(); }

Class: org.apache.hadoop.hdfs.server.namenode.TestCacheDirectives

InternalCallVerifier EqualityVerifier 
@Test(timeout=120000) public void testWaitForCachedReplicas() throws Exception { FileSystemTestHelper helper=new FileSystemTestHelper(); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ return ((namenode.getNamesystem().getCacheCapacity() == (NUM_DATANODES * CACHE_CAPACITY)) && (namenode.getNamesystem().getCacheUsed() == 0)); } } ,500,60000); NamenodeProtocols nnRpc=namenode.getRpcServer(); DataNode dn0=cluster.getDataNodes().get(0); String bpid=cluster.getNamesystem().getBlockPoolId(); LinkedList bogusBlockIds=new LinkedList(); bogusBlockIds.add(999999L); nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid),bpid,bogusBlockIds); Path rootDir=helper.getDefaultWorkingDirectory(dfs); final String pool="friendlyPool"; nnRpc.addCachePool(new CachePoolInfo("friendlyPool")); final int numFiles=2; final int numBlocksPerFile=2; final List paths=new ArrayList(numFiles); for (int i=0; i < numFiles; i++) { Path p=new Path(rootDir,"testCachePaths-" + i); FileSystemTestHelper.createFile(dfs,p,numBlocksPerFile,(int)BLOCK_SIZE); paths.add(p.toUri().getPath()); } waitForCachedBlocks(namenode,0,0,"testWaitForCachedReplicas:0"); int expected=0; for (int i=0; i < numFiles; i++) { CacheDirectiveInfo directive=new CacheDirectiveInfo.Builder().setPath(new Path(paths.get(i))).setPool(pool).build(); nnRpc.addCacheDirective(directive,EnumSet.noneOf(CacheFlag.class)); expected+=numBlocksPerFile; waitForCachedBlocks(namenode,expected,expected,"testWaitForCachedReplicas:1"); } DatanodeInfo[] live=dfs.getDataNodeStats(DatanodeReportType.LIVE); assertEquals("Unexpected number of live nodes",NUM_DATANODES,live.length); long totalUsed=0; for ( DatanodeInfo dn : live) { final long cacheCapacity=dn.getCacheCapacity(); final long cacheUsed=dn.getCacheUsed(); final long cacheRemaining=dn.getCacheRemaining(); assertEquals("Unexpected cache capacity",CACHE_CAPACITY,cacheCapacity); assertEquals("Capacity not equal to used + remaining",cacheCapacity,cacheUsed + cacheRemaining); assertEquals("Remaining not equal to capacity - used",cacheCapacity - cacheUsed,cacheRemaining); totalUsed+=cacheUsed; } assertEquals(expected * BLOCK_SIZE,totalUsed); RemoteIterator entries=new CacheDirectiveIterator(nnRpc,null); for (int i=0; i < numFiles; i++) { CacheDirectiveEntry entry=entries.next(); nnRpc.removeCacheDirective(entry.getInfo().getId()); expected-=numBlocksPerFile; waitForCachedBlocks(namenode,expected,expected,"testWaitForCachedReplicas:2"); } }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testCreateAndModifyPools() throws Exception { String poolName="pool1"; String ownerName="abc"; String groupName="123"; FsPermission mode=new FsPermission((short)0755); long limit=150; dfs.addCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit)); RemoteIterator iter=dfs.listCachePools(); CachePoolInfo info=iter.next().getInfo(); assertEquals(poolName,info.getPoolName()); assertEquals(ownerName,info.getOwnerName()); assertEquals(groupName,info.getGroupName()); ownerName="def"; groupName="456"; mode=new FsPermission((short)0700); limit=151; dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(ownerName).setGroupName(groupName).setMode(mode).setLimit(limit)); iter=dfs.listCachePools(); info=iter.next().getInfo(); assertEquals(poolName,info.getPoolName()); assertEquals(ownerName,info.getOwnerName()); assertEquals(groupName,info.getGroupName()); assertEquals(mode,info.getMode()); assertEquals(limit,(long)info.getLimit()); dfs.removeCachePool(poolName); iter=dfs.listCachePools(); assertFalse("expected no cache pools after deleting pool",iter.hasNext()); proto.listCachePools(null); try { proto.removeCachePool("pool99"); fail("expected to get an exception when " + "removing a non-existent pool."); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe); } try { proto.removeCachePool(poolName); fail("expected to get an exception when " + "removing a non-existent pool."); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Cannot remove non-existent",ioe); } iter=dfs.listCachePools(); assertFalse("expected no cache pools after deleting pool",iter.hasNext()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testMaxRelativeExpiry() throws Exception { try { dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l)); fail("Added a pool with a negative max expiry."); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("negative",e); } try { dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(Long.MAX_VALUE - 1)); fail("Added a pool with too big of a max expiry."); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("too big",e); } CachePoolInfo coolPool=new CachePoolInfo("coolPool"); final long poolExpiration=1000 * 60 * 10l; dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration)); RemoteIterator poolIt=dfs.listCachePools(); CachePoolInfo listPool=poolIt.next().getInfo(); assertFalse("Should only be one pool",poolIt.hasNext()); assertEquals("Expected max relative expiry to match set value",poolExpiration,listPool.getMaxRelativeExpiryMs().longValue()); try { dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l)); fail("Added a pool with a negative max expiry."); } catch ( InvalidRequestException e) { assertExceptionContains("negative",e); } try { dfs.modifyCachePool(coolPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER + 1)); fail("Added a pool with too big of a max expiry."); } catch ( InvalidRequestException e) { assertExceptionContains("too big",e); } CacheDirectiveInfo defaultExpiry=new CacheDirectiveInfo.Builder().setPath(new Path("/blah")).setPool(coolPool.getPoolName()).build(); dfs.addCacheDirective(defaultExpiry); RemoteIterator dirIt=dfs.listCacheDirectives(defaultExpiry); CacheDirectiveInfo listInfo=dirIt.next().getInfo(); assertFalse("Should only have one entry in listing",dirIt.hasNext()); long listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime(); assertTrue("Directive expiry should be approximately the pool's max expiry",Math.abs(listExpiration - poolExpiration) < 10 * 1000); CacheDirectiveInfo.Builder builder=new CacheDirectiveInfo.Builder().setPath(new Path("/lolcat")).setPool(coolPool.getPoolName()); try { dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(poolExpiration + 1)).build()); fail("Added a directive that exceeds pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } try { dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build()); fail("Added a directive that exceeds pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newRelative(poolExpiration + 1)).build()); fail("Modified a directive to exceed pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build()); fail("Modified a directive to exceed pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } try { dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(Long.MAX_VALUE)).build()); fail("Added a directive with a gigantic max value"); } catch ( IllegalArgumentException e) { assertExceptionContains("is too far in the future",e); } try { dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build()); fail("Added a directive with a gigantic max value"); } catch ( InvalidRequestException e) { assertExceptionContains("is too far in the future",e); } try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.NEVER).build()); fail("Modified a directive to exceed pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build()); fail("Modified a directive to exceed pool's max relative expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("is too far in the future",e); } CachePoolInfo destPool=new CachePoolInfo("destPool"); dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2)); try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).build()); fail("Modified a directive to a pool with a lower max expiration"); } catch ( InvalidRequestException e) { assertExceptionContains("exceeds the max relative expiration",e); } dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).setExpiration(Expiration.newRelative(poolExpiration / 2)).build()); dirIt=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool(destPool.getPoolName()).build()); listInfo=dirIt.next().getInfo(); listExpiration=listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime(); assertTrue("Unexpected relative expiry " + listExpiration + " expected approximately "+ poolExpiration / 2,Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000); dfs.modifyCachePool(destPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER)); poolIt=dfs.listCachePools(); listPool=poolIt.next().getInfo(); while (!listPool.getPoolName().equals(destPool.getPoolName())) { listPool=poolIt.next().getInfo(); } assertEquals("Expected max relative expiry to match set value",CachePoolInfo.RELATIVE_EXPIRY_NEVER,listPool.getMaxRelativeExpiryMs().longValue()); dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER)).build()); dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)).build()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testLimit() throws Exception { try { dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l)); fail("Should not be able to set a negative limit"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("negative",e); } final String destiny="poolofdestiny"; final Path path1=new Path("/destiny"); DFSTestUtil.createFile(dfs,path1,2 * BLOCK_SIZE,(short)1,0x9494); final CachePoolInfo poolInfo=new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1); dfs.addCachePool(poolInfo); final CacheDirectiveInfo info1=new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build(); try { dfs.addCacheDirective(info1); fail("Should not be able to cache when there is no more limit"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("remaining capacity",e); } poolInfo.setLimit(2 * BLOCK_SIZE); dfs.modifyCachePool(poolInfo); long id1=dfs.addCacheDirective(info1); waitForCachePoolStats(dfs,2 * BLOCK_SIZE,2 * BLOCK_SIZE,1,1,poolInfo,"testLimit:1"); final Path path2=new Path("/failure"); DFSTestUtil.createFile(dfs,path2,BLOCK_SIZE,(short)1,0x9495); try { dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(),EnumSet.noneOf(CacheFlag.class)); fail("Should not be able to add another cached file"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("remaining capacity",e); } poolInfo.setLimit(BLOCK_SIZE); dfs.modifyCachePool(poolInfo); waitForCachePoolStats(dfs,2 * BLOCK_SIZE,0,1,0,poolInfo,"testLimit:2"); RemoteIterator it=dfs.listCachePools(); assertTrue("Expected a cache pool",it.hasNext()); CachePoolStats stats=it.next().getStats(); assertEquals("Overlimit bytes should be difference of needed and limit",BLOCK_SIZE,stats.getBytesOverlimit()); CachePoolInfo inadequate=new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE); dfs.addCachePool(inadequate); try { dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.noneOf(CacheFlag.class)); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("remaining capacity",e); } dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(),EnumSet.of(CacheFlag.FORCE)); dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(),EnumSet.of(CacheFlag.FORCE)); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testCacheManagerRestart() throws Exception { SecondaryNameNode secondary=null; try { conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0"); secondary=new SecondaryNameNode(conf); final String pool="poolparty"; String groupName="partygroup"; FsPermission mode=new FsPermission((short)0777); long limit=747; dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit)); RemoteIterator pit=dfs.listCachePools(); assertTrue("No cache pools found",pit.hasNext()); CachePoolInfo info=pit.next().getInfo(); assertEquals(pool,info.getPoolName()); assertEquals(groupName,info.getGroupName()); assertEquals(mode,info.getMode()); assertEquals(limit,(long)info.getLimit()); assertFalse("Unexpected # of cache pools found",pit.hasNext()); int numEntries=10; String entryPrefix="/party-"; long prevId=-1; final Date expiry=new Date(); for (int i=0; i < numEntries; i++) { prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build()); } RemoteIterator dit=dfs.listCacheDirectives(null); for (int i=0; i < numEntries; i++) { assertTrue("Unexpected # of cache entries: " + i,dit.hasNext()); CacheDirectiveInfo cd=dit.next().getInfo(); assertEquals(i + 1,cd.getId().longValue()); assertEquals(entryPrefix + i,cd.getPath().toUri().getPath()); assertEquals(pool,cd.getPool()); } assertFalse("Unexpected # of cache directives found",dit.hasNext()); secondary.doCheckpoint(); final String imagePool="imagePool"; dfs.addCachePool(new CachePoolInfo(imagePool)); prevId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build()); dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); dfs.saveNamespace(); dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); boolean fetchImage=secondary.doCheckpoint(); assertTrue("Secondary should have fetched a new fsimage from NameNode",fetchImage); dfs.removeCachePool(imagePool); cluster.restartNameNode(); pit=dfs.listCachePools(); assertTrue("No cache pools found",pit.hasNext()); info=pit.next().getInfo(); assertEquals(pool,info.getPoolName()); assertEquals(pool,info.getPoolName()); assertEquals(groupName,info.getGroupName()); assertEquals(mode,info.getMode()); assertEquals(limit,(long)info.getLimit()); assertFalse("Unexpected # of cache pools found",pit.hasNext()); dit=dfs.listCacheDirectives(null); for (int i=0; i < numEntries; i++) { assertTrue("Unexpected # of cache entries: " + i,dit.hasNext()); CacheDirectiveInfo cd=dit.next().getInfo(); assertEquals(i + 1,cd.getId().longValue()); assertEquals(entryPrefix + i,cd.getPath().toUri().getPath()); assertEquals(pool,cd.getPool()); assertEquals(expiry.getTime(),cd.getExpiration().getMillis()); } assertFalse("Unexpected # of cache directives found",dit.hasNext()); long nextId=dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build()); assertEquals(prevId + 1,nextId); } finally { if (secondary != null) { secondary.shutdown(); } } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAddRemoveDirectives() throws Exception { proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short)0777))); proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short)0777))); proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short)0777))); proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short)0))); CacheDirectiveInfo alpha=new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build(); CacheDirectiveInfo beta=new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build(); CacheDirectiveInfo delta=new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build(); long alphaId=addAsUnprivileged(alpha); long alphaId2=addAsUnprivileged(alpha); assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo",alphaId == alphaId2); long betaId=addAsUnprivileged(beta); try { addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build()); fail("expected an error when adding to a non-existent pool."); } catch ( InvalidRequestException ioe) { GenericTestUtils.assertExceptionContains("Unknown pool",ioe); } try { addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build()); fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone)."); } catch ( AccessControlException e) { GenericTestUtils.assertExceptionContains("Permission denied while accessing pool",e); } try { addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build()); fail("expected an error when adding a malformed path " + "to the cache directives."); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("is not a valid DFS filename",e); } try { addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short)1).setPool("").build()); fail("expected an error when adding a cache " + "directive with an empty pool name."); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("Invalid empty pool name",e); } long deltaId=addAsUnprivileged(delta); long relativeId=addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build()); RemoteIterator iter; iter=dfs.listCacheDirectives(null); validateListAll(iter,alphaId,alphaId2,betaId,deltaId,relativeId); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build()); assertFalse(iter.hasNext()); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build()); validateListAll(iter,alphaId,alphaId2,deltaId,relativeId); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build()); validateListAll(iter,betaId); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build()); validateListAll(iter,alphaId2); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build()); validateListAll(iter,relativeId); dfs.removeCacheDirective(betaId); iter=dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build()); assertFalse(iter.hasNext()); try { dfs.removeCacheDirective(betaId); fail("expected an error when removing a non-existent ID"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("No directive with ID",e); } try { proto.removeCacheDirective(-42l); fail("expected an error when removing a negative ID"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("Invalid negative ID",e); } try { proto.removeCacheDirective(43l); fail("expected an error when removing a non-existent ID"); } catch ( InvalidRequestException e) { GenericTestUtils.assertExceptionContains("No directive with ID",e); } dfs.removeCacheDirective(alphaId); dfs.removeCacheDirective(alphaId2); dfs.removeCacheDirective(deltaId); dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short)555).build()); iter=dfs.listCacheDirectives(null); assertTrue(iter.hasNext()); CacheDirectiveInfo modified=iter.next().getInfo(); assertEquals(relativeId,modified.getId().longValue()); assertEquals((short)555,modified.getReplication().shortValue()); dfs.removeCacheDirective(relativeId); iter=dfs.listCacheDirectives(null); assertFalse(iter.hasNext()); CacheDirectiveInfo directive=new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build(); long id=dfs.addCacheDirective(directive); dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short)2).build()); dfs.removeCacheDirective(id); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testListCachePoolPermissions() throws Exception { final UserGroupInformation myUser=UserGroupInformation.createRemoteUser("myuser"); final DistributedFileSystem myDfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser,conf); final String poolName="poolparty"; dfs.addCachePool(new CachePoolInfo(poolName).setMode(new FsPermission((short)0700))); RemoteIterator it=myDfs.listCachePools(); CachePoolInfo info=it.next().getInfo(); assertFalse(it.hasNext()); assertEquals("Expected pool name",poolName,info.getPoolName()); assertNull("Unexpected owner name",info.getOwnerName()); assertNull("Unexpected group name",info.getGroupName()); assertNull("Unexpected mode",info.getMode()); assertNull("Unexpected limit",info.getLimit()); final long limit=99; dfs.modifyCachePool(new CachePoolInfo(poolName).setOwnerName(myUser.getShortUserName()).setLimit(limit)); it=myDfs.listCachePools(); info=it.next().getInfo(); assertFalse(it.hasNext()); assertEquals("Expected pool name",poolName,info.getPoolName()); assertEquals("Mismatched owner name",myUser.getShortUserName(),info.getOwnerName()); assertNotNull("Expected group name",info.getGroupName()); assertEquals("Mismatched mode",(short)0700,info.getMode().toShort()); assertEquals("Mismatched limit",limit,(long)info.getLimit()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestCheckPointForSecurityTokens

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests save namespace. */ @Test public void testSaveNamespace() throws IOException { DistributedFileSystem fs=null; try { Configuration conf=new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build(); cluster.waitActive(); fs=cluster.getFileSystem(); FSNamesystem namesystem=cluster.getNamesystem(); String renewer=UserGroupInformation.getLoginUser().getUserName(); Token token1=namesystem.getDelegationToken(new Text(renewer)); Token token2=namesystem.getDelegationToken(new Text(renewer)); DFSAdmin admin=new DFSAdmin(conf); String[] args=new String[]{"-saveNamespace"}; NameNode nn=cluster.getNameNode(); for ( StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) { EditLogFile log=FSImageTestUtil.findLatestEditsLog(sd); assertTrue(log.isInProgress()); log.validateLog(); long numTransactions=(log.getLastTxId() - log.getFirstTxId()) + 1; assertEquals("In-progress log " + log + " should have 5 transactions",5,numTransactions); ; } fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); try { admin.run(args); } catch ( Exception e) { throw new IOException(e.getMessage()); } for ( StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) { EditLogFile log=FSImageTestUtil.findLatestEditsLog(sd); assertTrue(log.isInProgress()); log.validateLog(); long numTransactions=(log.getLastTxId() - log.getFirstTxId()) + 1; assertEquals("In-progress log " + log + " should only have START txn",1,numTransactions); } cluster.shutdown(); cluster=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); cluster.waitActive(); try { renewToken(token1); renewToken(token2); } catch ( IOException e) { fail("Could not renew or cancel the token"); } namesystem=cluster.getNamesystem(); Token token3=namesystem.getDelegationToken(new Text(renewer)); Token token4=namesystem.getDelegationToken(new Text(renewer)); cluster.shutdown(); cluster=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); cluster.waitActive(); namesystem=cluster.getNamesystem(); Token token5=namesystem.getDelegationToken(new Text(renewer)); try { renewToken(token1); renewToken(token2); renewToken(token3); renewToken(token4); renewToken(token5); } catch ( IOException e) { fail("Could not renew or cancel the token"); } cluster.shutdown(); cluster=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build(); cluster.waitActive(); namesystem=cluster.getNamesystem(); try { renewToken(token1); cancelToken(token1); renewToken(token2); cancelToken(token2); renewToken(token3); cancelToken(token3); renewToken(token4); cancelToken(token4); renewToken(token5); cancelToken(token5); } catch ( IOException e) { fail("Could not renew or cancel the token"); } } finally { if (fs != null) fs.close(); if (cluster != null) cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestCheckpoint

InternalCallVerifier EqualityVerifier 
/** * Test case where two secondary namenodes are checkpointing the same * NameNode. This differs from {@link #testMultipleSecondaryNamenodes()}since that test runs against two distinct NNs. * This case tests the following interleaving: * - 2NN A) calls rollEdits() * - 2NN B) calls rollEdits() * - 2NN A) paused at getRemoteEditLogManifest() * - 2NN B) calls getRemoteEditLogManifest() (returns up to txid 4) * - 2NN B) uploads checkpoint fsimage_4 * - 2NN A) allowed to proceed, also returns up to txid 4 * - 2NN A) uploads checkpoint fsimage_4 as well, should fail gracefully * It verifies that one of the two gets an error that it's uploading a * duplicate checkpoint, and the other one succeeds. */ @Test public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; SecondaryNameNode secondary1=null, secondary2=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); secondary1=startSecondaryNameNode(conf,1); secondary2=startSecondaryNameNode(conf,2); final NamenodeProtocol origNN=secondary1.getNameNode(); final Answer delegator=new GenericTestUtils.DelegateAnswer(origNN); NamenodeProtocol spyNN=Mockito.mock(NamenodeProtocol.class,delegator); DelayAnswer delayer=new DelayAnswer(LOG){ @Override protected Object passThrough( InvocationOnMock invocation) throws Throwable { return delegator.answer(invocation); } } ; secondary1.setNameNode(spyNN); Mockito.doAnswer(delayer).when(spyNN).getEditLogManifest(Mockito.anyLong()); DoCheckpointThread checkpointThread=new DoCheckpointThread(secondary1); checkpointThread.start(); delayer.waitForCall(); secondary2.doCheckpoint(); NNStorage storage=cluster.getNameNode().getFSImage().getStorage(); assertEquals(4,storage.getMostRecentCheckpointTxId()); delayer.proceed(); checkpointThread.join(); checkpointThread.propagateExceptions(); assertEquals(4,storage.getMostRecentCheckpointTxId()); secondary2.doCheckpoint(); assertEquals(6,storage.getMostRecentCheckpointTxId()); assertNNHasCheckpoints(cluster,ImmutableList.of(4,6)); secondary1.setNameNode(origNN); secondary1.doCheckpoint(); assertEquals(8,storage.getMostRecentCheckpointTxId()); assertParallelFilesInvariant(cluster,ImmutableList.of(secondary1,secondary2)); assertNNHasCheckpoints(cluster,ImmutableList.of(6,8)); } finally { cleanup(secondary1); secondary1=null; cleanup(secondary2); secondary2=null; cleanup(cluster); cluster=null; } }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCommandLineParsing() throws ParseException { SecondaryNameNode.CommandLineOpts opts=new SecondaryNameNode.CommandLineOpts(); opts.parse(); assertNull(opts.getCommand()); opts.parse("-checkpoint"); assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,opts.getCommand()); assertFalse(opts.shouldForceCheckpoint()); opts.parse("-checkpoint","force"); assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,opts.getCommand()); assertTrue(opts.shouldForceCheckpoint()); opts.parse("-geteditsize"); assertEquals(SecondaryNameNode.CommandLineOpts.Command.GETEDITSIZE,opts.getCommand()); opts.parse("-format"); assertTrue(opts.shouldFormat()); try { opts.parse("-geteditsize","-checkpoint"); fail("Should have failed bad parsing for two actions"); } catch ( ParseException e) { LOG.warn("Encountered ",e); } try { opts.parse("-checkpoint","xx"); fail("Should have failed for bad checkpoint arg"); } catch ( ParseException e) { LOG.warn("Encountered ",e); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test case where the NN is configured with a name-only and an edits-only * dir, with storage-restore turned on. In this case, if the name-only dir * disappears and comes back, a new checkpoint after it has been restored * should function correctly. * @throws Exception */ @Test public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { MiniDFSCluster cluster=null; SecondaryNameNode secondary=null; File currentDir=null; Configuration conf=new HdfsConfiguration(); File base_dir=new File(MiniDFSCluster.getBaseDirectory()); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,true); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/name-only"); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,MiniDFSCluster.getBaseDirectory() + "/edits-only"); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,fileAsURI(new File(base_dir,"namesecondary1")).toString()); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).manageNameDfsDirs(false).build(); secondary=startSecondaryNameNode(conf); secondary.doCheckpoint(); NamenodeProtocols nn=cluster.getNameNodeRpc(); NNStorage storage=cluster.getNameNode().getFSImage().getStorage(); StorageDirectory sd0=storage.getStorageDir(0); assertEquals(NameNodeDirType.IMAGE,sd0.getStorageDirType()); currentDir=sd0.getCurrentDir(); assertEquals(0,FileUtil.chmod(currentDir.getAbsolutePath(),"000")); try { secondary.doCheckpoint(); fail("Did not fail to checkpoint when there are no valid storage dirs"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("No targets in destination storage",ioe); } assertEquals(0,FileUtil.chmod(currentDir.getAbsolutePath(),"755")); nn.restoreFailedStorage("true"); nn.rollEditLog(); secondary.doCheckpoint(); assertNNHasCheckpoints(cluster,ImmutableList.of(8)); assertParallelFilesInvariant(cluster,ImmutableList.of(secondary)); } finally { if (currentDir != null) { FileUtil.chmod(currentDir.getAbsolutePath(),"755"); } cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the secondary doesn't have to re-download image * if it hasn't changed. */ @Test public void testSecondaryImageDownload() throws IOException { LOG.info("Starting testSecondaryImageDownload"); Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,"0.0.0.0:0"); Path dir=new Path("/checkpoint"); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build(); cluster.waitActive(); FileSystem fileSys=cluster.getFileSystem(); FSImage image=cluster.getNameNode().getFSImage(); SecondaryNameNode secondary=null; try { assertTrue(!fileSys.exists(dir)); secondary=startSecondaryNameNode(conf); File secondaryDir=new File(MiniDFSCluster.getBaseDirectory(),"namesecondary1"); File secondaryCurrent=new File(secondaryDir,"current"); long expectedTxIdToDownload=cluster.getNameNode().getFSImage().getStorage().getMostRecentCheckpointTxId(); File secondaryFsImageBefore=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload)); File secondaryFsImageAfter=new File(secondaryCurrent,NNStorage.getImageFileName(expectedTxIdToDownload + 2)); assertFalse("Secondary should start with empty current/ dir " + "but " + secondaryFsImageBefore + " exists",secondaryFsImageBefore.exists()); assertTrue("Secondary should have loaded an image",secondary.doCheckpoint()); assertTrue("Secondary should have downloaded original image",secondaryFsImageBefore.exists()); assertTrue("Secondary should have created a new image",secondaryFsImageAfter.exists()); long fsimageLength=secondaryFsImageBefore.length(); assertEquals("Image size should not have changed",fsimageLength,secondaryFsImageAfter.length()); fileSys.mkdirs(dir); assertFalse("Another checkpoint should not have to re-load image",secondary.doCheckpoint()); for ( StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) { File imageFile=NNStorage.getImageFile(sd,NameNodeFile.IMAGE,expectedTxIdToDownload + 5); assertTrue("Image size increased",imageFile.length() > fsimageLength); } } finally { fileSys.close(); cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that, if the edits dir is separate from the name dir, it is * properly locked. */ @Test public void testSeparateEditsDirLocking() throws IOException { Configuration conf=new HdfsConfiguration(); File nameDir=new File(MiniDFSCluster.getBaseDirectory(),"name"); File editsDir=new File(MiniDFSCluster.getBaseDirectory(),"testSeparateEditsDirLocking"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,editsDir.getAbsolutePath()); MiniDFSCluster cluster=null; StorageDirectory savedSd=null; try { cluster=new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false).numDataNodes(0).build(); NNStorage storage=cluster.getNameNode().getFSImage().getStorage(); for ( StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) { assertEquals(editsDir.getAbsoluteFile(),sd.getRoot()); assertLockFails(sd); savedSd=sd; } } finally { cleanup(cluster); cluster=null; } assertNotNull(savedSd); assertClusterStartFailsWhenDirLocked(conf,savedSd); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Starts two namenodes and two secondary namenodes, verifies that secondary * namenodes are configured correctly to talk to their respective namenodes * and can do the checkpoint. * @throws IOException */ @Test public void testMultipleSecondaryNamenodes() throws IOException { Configuration conf=new HdfsConfiguration(); String nameserviceId1="ns1"; String nameserviceId2="ns2"; conf.set(DFSConfigKeys.DFS_NAMESERVICES,nameserviceId1 + "," + nameserviceId2); MiniDFSCluster cluster=null; SecondaryNameNode secondary1=null; SecondaryNameNode secondary2=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).build(); Configuration snConf1=new HdfsConfiguration(cluster.getConfiguration(0)); Configuration snConf2=new HdfsConfiguration(cluster.getConfiguration(1)); InetSocketAddress nn1RpcAddress=cluster.getNameNode(0).getNameNodeAddress(); InetSocketAddress nn2RpcAddress=cluster.getNameNode(1).getNameNodeAddress(); String nn1=nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort(); String nn2=nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort(); snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,""); snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,""); snConf1.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId1),nn1); snConf2.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,nameserviceId2),nn2); secondary1=startSecondaryNameNode(snConf1); secondary2=startSecondaryNameNode(snConf2); assertEquals(secondary1.getNameNodeAddress().getPort(),nn1RpcAddress.getPort()); assertEquals(secondary2.getNameNodeAddress().getPort(),nn2RpcAddress.getPort()); assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2.getNameNodeAddress().getPort()); secondary1.doCheckpoint(); secondary2.doCheckpoint(); } finally { cleanup(secondary1); secondary1=null; cleanup(secondary2); secondary2=null; cleanup(cluster); cluster=null; } }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testTooManyEditReplayFailures() throws IOException { Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY,"1"); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,"1"); FSDataOutputStream fos=null; SecondaryNameNode secondary=null; MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).checkExitOnShutdown(false).build(); cluster.waitActive(); fs=cluster.getFileSystem(); fos=fs.create(new Path("tmpfile0")); fos.write(new byte[]{0,1,2,3}); Mockito.doThrow(new IOException("Injecting failure during merge")).when(faultInjector).duringMerge(); secondary=startSecondaryNameNode(conf); secondary.doWork(); fail("2NN did not exit."); } catch ( ExitException ee) { ExitUtil.resetFirstExitException(); assertEquals("Max retries",1,secondary.getMergeErrorCount() - 1); } finally { if (fs != null) { fs.close(); } cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; Mockito.reset(faultInjector); } }

InternalCallVerifier EqualityVerifier 
/** * Test case where two secondary namenodes are checkpointing the same * NameNode. This differs from {@link #testMultipleSecondaryNamenodes()}since that test runs against two distinct NNs. * This case tests the following interleaving: * - 2NN A downloads image (up to txid 2) * - 2NN A about to save its own checkpoint * - 2NN B downloads image (up to txid 4) * - 2NN B uploads checkpoint (txid 4) * - 2NN A uploads checkpoint (txid 2) * It verifies that this works even though the earlier-txid checkpoint gets * uploaded after the later-txid checkpoint. */ @Test public void testMultipleSecondaryNNsAgainstSameNN() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; SecondaryNameNode secondary1=null, secondary2=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); secondary1=startSecondaryNameNode(conf,1); secondary2=startSecondaryNameNode(conf,2); CheckpointStorage spyImage1=spyOnSecondaryImage(secondary1); DelayAnswer delayer=new DelayAnswer(LOG); Mockito.doAnswer(delayer).when(spyImage1).saveFSImageInAllDirs(Mockito.any(),Mockito.anyLong()); DoCheckpointThread checkpointThread=new DoCheckpointThread(secondary1); checkpointThread.start(); delayer.waitForCall(); secondary2.doCheckpoint(); delayer.proceed(); checkpointThread.join(); checkpointThread.propagateExceptions(); NNStorage storage=cluster.getNameNode().getFSImage().getStorage(); assertEquals(4,storage.getMostRecentCheckpointTxId()); assertNNHasCheckpoints(cluster,ImmutableList.of(2,4)); secondary2.doCheckpoint(); assertEquals(6,storage.getMostRecentCheckpointTxId()); assertParallelFilesInvariant(cluster,ImmutableList.of(secondary1,secondary2)); assertNNHasCheckpoints(cluster,ImmutableList.of(4,6)); } finally { cleanup(secondary1); secondary1=null; cleanup(secondary2); secondary2=null; if (cluster != null) { cluster.shutdown(); cluster=null; } } }

APIUtilityVerifier EqualityVerifier 
/** * Regression test for HDFS-3678 "Edit log files are never being purged from 2NN" */ @Test public void testSecondaryPurgesEditLogs() throws IOException { MiniDFSCluster cluster=null; SecondaryNameNode secondary=null; Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY,0); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); FileSystem fs=cluster.getFileSystem(); fs.mkdirs(new Path("/foo")); secondary=startSecondaryNameNode(conf); for (int i=0; i < 5; i++) { secondary.doCheckpoint(); } List checkpointDirs=getCheckpointCurrentDirs(secondary); for ( File checkpointDir : checkpointDirs) { List editsFiles=FileJournalManager.matchEditLogs(checkpointDir); assertEquals("Edit log files were not purged from 2NN",1,editsFiles.size()); } } finally { cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Regression test for HDFS-3849. This makes sure that when we re-load the * FSImage in the 2NN, we clear the existing leases. */ @Test public void testSecondaryNameNodeWithSavedLeases() throws IOException { MiniDFSCluster cluster=null; SecondaryNameNode secondary=null; FSDataOutputStream fos=null; Configuration conf=new HdfsConfiguration(); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build(); FileSystem fs=cluster.getFileSystem(); fos=fs.create(new Path("tmpfile")); fos.write(new byte[]{0,1,2,3}); fos.hflush(); assertEquals(1,cluster.getNamesystem().getLeaseManager().countLease()); secondary=startSecondaryNameNode(conf); assertEquals(0,secondary.getFSNamesystem().getLeaseManager().countLease()); secondary.doCheckpoint(); assertEquals(1,secondary.getFSNamesystem().getLeaseManager().countLease()); fos.close(); fos=null; cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false); cluster.getNameNodeRpc().saveNamespace(); cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false); secondary.doCheckpoint(); assertEquals(0,secondary.getFSNamesystem().getLeaseManager().countLease()); } finally { if (fos != null) { fos.close(); } cleanup(secondary); secondary=null; cleanup(cluster); cluster=null; } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testLegacyOivImage() throws Exception { MiniDFSCluster cluster=null; SecondaryNameNode secondary=null; File tmpDir=Files.createTempDir(); Configuration conf=new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY,tmpDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY,"2"); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); secondary=startSecondaryNameNode(conf); secondary.doCheckpoint(); String files1[]=tmpDir.list(); assertEquals("Only one file is expected",1,files1.length); secondary.doCheckpoint(); secondary.doCheckpoint(); String files2[]=tmpDir.list(); assertEquals("Two files are expected",2,files2.length); for ( String fName : files2) { assertFalse(fName.equals(files1[0])); } } finally { cleanup(secondary); cleanup(cluster); tmpDir.delete(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestClusterId

APIUtilityVerifier BranchVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format -force -clusterid option when name * directory exists. Format should succeed. * @throws IOException */ @Test public void testFormatWithForceAndClusterId() throws IOException { if (!hdfsDir.mkdirs()) { fail("Failed to create dir " + hdfsDir.getPath()); } String myId="testFormatWithForceAndClusterId"; String[] argv={"-format","-force","-clusterid",myId}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cId=getClusterId(config); assertEquals("ClusterIds do not match",myId,cId); }

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format -force options when name directory * exists. Format should succeed. * @throws IOException */ @Test public void testFormatWithForce() throws IOException { if (!hdfsDir.mkdirs()) { fail("Failed to create dir " + hdfsDir.getPath()); } String[] argv={"-format","-force"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -force -nonInteractive -force option. Format * should succeed. * @throws IOException */ @Test public void testFormatWithNonInteractiveAndForce() throws IOException { if (!hdfsDir.mkdirs()) { fail("Failed to create dir " + hdfsDir.getPath()); } String[] argv={"-format","-nonInteractive","-force"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format option when a non empty name directory * exists. Enter N when prompted and format should be aborted. * @throws IOException * @throws InterruptedException */ @Test public void testFormatWithoutForceEnterNo() throws IOException, InterruptedException { File data=new File(hdfsDir,"file"); if (!data.mkdirs()) { fail("Failed to create dir " + data.getPath()); } InputStream origIn=System.in; ByteArrayInputStream bins=new ByteArrayInputStream("N\n".getBytes()); System.setIn(bins); String[] argv={"-format"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should not have succeeded",1,e.status); } System.setIn(origIn); File version=new File(hdfsDir,"current/VERSION"); assertFalse("Check version should not exist",version.exists()); }

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format option when an empty name directory * exists. Format should succeed. * @throws IOException */ @Test public void testFormatWithEmptyDir() throws IOException { if (!hdfsDir.mkdirs()) { fail("Failed to create dir " + hdfsDir.getPath()); } String[] argv={"-format"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format -nonInteractive options when name * directory does not exist. Format should succeed. * @throws IOException */ @Test public void testFormatWithNonInteractiveNameDirDoesNotExit() throws IOException { String[] argv={"-format","-nonInteractive"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format option when a non empty name directory * exists. Enter Y when prompted and the format should succeed. * @throws IOException * @throws InterruptedException */ @Test public void testFormatWithoutForceEnterYes() throws IOException, InterruptedException { File data=new File(hdfsDir,"file"); if (!data.mkdirs()) { fail("Failed to create dir " + data.getPath()); } InputStream origIn=System.in; ByteArrayInputStream bins=new ByteArrayInputStream("Y\n".getBytes()); System.setIn(bins); String[] argv={"-format"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } System.setIn(origIn); String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format option. Format should succeed. * @throws IOException */ @Test public void testFormat() throws IOException { String[] argv={"-format"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have succeeded",0,e.status); } String cid=getClusterId(config); assertTrue("Didn't get new ClusterId",(cid != null && !cid.equals(""))); }

BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test namenode format with -format -nonInteractive options when a non empty * name directory exists. Format should not succeed. * @throws IOException */ @Test public void testFormatWithNonInteractive() throws IOException { File data=new File(hdfsDir,"file"); if (!data.mkdirs()) { fail("Failed to create dir " + data.getPath()); } String[] argv={"-format","-nonInteractive"}; try { NameNode.createNameNode(argv,config); fail("createNameNode() did not call System.exit()"); } catch ( ExitException e) { assertEquals("Format should have been aborted with exit code 1",1,e.status); } File version=new File(hdfsDir,"current/VERSION"); assertFalse("Check version should not exist",version.exists()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestDeadDatanode

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test to ensure namenode rejects request from dead datanode * - Start a cluster * - Shutdown the datanode and wait for it to be marked dead at the namenode * - Send datanode requests to Namenode and make sure it is rejected * appropriately. */ @Test public void testDeadDatanode() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,500); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1L); cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); String poolId=cluster.getNamesystem().getBlockPoolId(); DataNode dn=cluster.getDataNodes().get(0); DatanodeRegistration reg=DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0),poolId); waitForDatanodeState(reg.getDatanodeUuid(),true,20000); dn.shutdown(); waitForDatanodeState(reg.getDatanodeUuid(),false,20000); DatanodeProtocol dnp=cluster.getNameNodeRpc(); ReceivedDeletedBlockInfo[] blocks={new ReceivedDeletedBlockInfo(new Block(0),ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,null)}; StorageReceivedDeletedBlocks[] storageBlocks={new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(),blocks)}; try { dnp.blockReceivedAndDeleted(reg,poolId,storageBlocks); fail("Expected IOException is not thrown"); } catch ( IOException ex) { } StorageBlockReport[] report={new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()),new long[]{0L,0L,0L})}; try { dnp.blockReport(reg,poolId,report); fail("Expected IOException is not thrown"); } catch ( IOException ex) { } StorageReport[] rep={new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()),false,0,0,0,0)}; DatanodeCommand[] cmd=dnp.sendHeartbeat(reg,rep,0L,0L,0,0,0).getCommands(); assertEquals(1,cmd.length); assertEquals(cmd[0].getAction(),RegisterCommand.REGISTER.getAction()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestDecommissioningStatus

IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
/** * Tests Decommissioning Status in DFS. */ @Test public void testDecommissionStatus() throws IOException, InterruptedException { InetSocketAddress addr=new InetSocketAddress("localhost",cluster.getNameNodePort()); DFSClient client=new DFSClient(addr,conf); DatanodeInfo[] info=client.datanodeReport(DatanodeReportType.LIVE); assertEquals("Number of Datanodes ",2,info.length); DistributedFileSystem fileSys=cluster.getFileSystem(); DFSAdmin admin=new DFSAdmin(cluster.getConfiguration(0)); short replicas=2; Path file1=new Path("decommission.dat"); DFSTestUtil.createFile(fileSys,file1,fileSize,fileSize,blockSize,replicas,seed); Path file2=new Path("decommission1.dat"); FSDataOutputStream st1=writeIncompleteFile(fileSys,file2,replicas); Thread.sleep(5000); FSNamesystem fsn=cluster.getNamesystem(); final DatanodeManager dm=fsn.getBlockManager().getDatanodeManager(); for (int iteration=0; iteration < numDatanodes; iteration++) { String downnode=decommissionNode(fsn,client,localFileSys,iteration); dm.refreshNodes(conf); decommissionedNodes.add(downnode); Thread.sleep(5000); final List decommissioningNodes=dm.getDecommissioningNodes(); if (iteration == 0) { assertEquals(decommissioningNodes.size(),1); DatanodeDescriptor decommNode=decommissioningNodes.get(0); checkDecommissionStatus(decommNode,4,0,2); checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0,1),fileSys,admin); } else { assertEquals(decommissioningNodes.size(),2); DatanodeDescriptor decommNode1=decommissioningNodes.get(0); DatanodeDescriptor decommNode2=decommissioningNodes.get(1); checkDecommissionStatus(decommNode1,4,4,2); checkDecommissionStatus(decommNode2,4,4,2); checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0,2),fileSys,admin); } } writeConfigFile(localFileSys,excludeFile,null); dm.refreshNodes(conf); st1.close(); cleanupFile(fileSys,file1); cleanupFile(fileSys,file2); }

Class: org.apache.hadoop.hdfs.server.namenode.TestDeduplicationMap

InternalCallVerifier EqualityVerifier 
@Test public void testDeduplicationMap(){ DeduplicationMap m=DeduplicationMap.newMap(); Assert.assertEquals(1,m.getId("1")); Assert.assertEquals(2,m.getId("2")); Assert.assertEquals(3,m.getId("3")); Assert.assertEquals(1,m.getId("1")); Assert.assertEquals(2,m.getId("2")); Assert.assertEquals(3,m.getId("3")); }

Class: org.apache.hadoop.hdfs.server.namenode.TestDiskspaceQuotaUpdate

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if the quota can be correctly updated for append */ @Test(timeout=60000) public void testUpdateQuotaForAppend() throws Exception { final Path foo=new Path(dir,"foo"); final Path bar=new Path(foo,"bar"); long currentFileLen=BLOCKSIZE; DFSTestUtil.createFile(dfs,bar,currentFileLen,REPLICATION,seed); dfs.setQuota(foo,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1); DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE / 2); currentFileLen+=(BLOCKSIZE / 2); INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory(); assertTrue(fooNode.isQuotaSet()); Quota.Counts quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); long ns=quota.get(Quota.NAMESPACE); long ds=quota.get(Quota.DISKSPACE); assertEquals(2,ns); assertEquals(currentFileLen * REPLICATION,ds); ContentSummary c=dfs.getContentSummary(foo); assertEquals(c.getSpaceConsumed(),ds); DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE); currentFileLen+=BLOCKSIZE; quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); ns=quota.get(Quota.NAMESPACE); ds=quota.get(Quota.DISKSPACE); assertEquals(2,ns); assertEquals(currentFileLen * REPLICATION,ds); c=dfs.getContentSummary(foo); assertEquals(c.getSpaceConsumed(),ds); DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE * 3 + BLOCKSIZE / 8); currentFileLen+=(BLOCKSIZE * 3 + BLOCKSIZE / 8); quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); ns=quota.get(Quota.NAMESPACE); ds=quota.get(Quota.DISKSPACE); assertEquals(2,ns); assertEquals(currentFileLen * REPLICATION,ds); c=dfs.getContentSummary(foo); assertEquals(c.getSpaceConsumed(),ds); }

InternalCallVerifier EqualityVerifier 
/** * Test if the quota can be correctly updated when file length is updated * through fsync */ @Test(timeout=60000) public void testUpdateQuotaForFSync() throws Exception { final Path foo=new Path("/foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(dfs,bar,BLOCKSIZE,REPLICATION,0L); dfs.setQuota(foo,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1); FSDataOutputStream out=dfs.append(bar); out.write(new byte[BLOCKSIZE / 4]); ((DFSOutputStream)out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH)); INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory(); Quota.Counts quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); long ns=quota.get(Quota.NAMESPACE); long ds=quota.get(Quota.DISKSPACE); assertEquals(2,ns); assertEquals(BLOCKSIZE * 2 * REPLICATION,ds); out.write(new byte[BLOCKSIZE / 4]); out.close(); fooNode=fsdir.getINode4Write(foo.toString()).asDirectory(); quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); ns=quota.get(Quota.NAMESPACE); ds=quota.get(Quota.DISKSPACE); assertEquals(2,ns); assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION,ds); DFSTestUtil.appendFile(dfs,bar,BLOCKSIZE); quota=fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed(); ns=quota.get(Quota.NAMESPACE); ds=quota.get(Quota.DISKSPACE); assertEquals(2,ns); assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION,ds); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if the quota can be correctly updated for create file */ @Test(timeout=60000) public void testQuotaUpdateWithFileCreate() throws Exception { final Path foo=new Path(dir,"foo"); Path createdFile=new Path(foo,"created_file.data"); dfs.mkdirs(foo); dfs.setQuota(foo,Long.MAX_VALUE - 1,Long.MAX_VALUE - 1); long fileLen=BLOCKSIZE * 2 + BLOCKSIZE / 2; DFSTestUtil.createFile(dfs,createdFile,BLOCKSIZE / 16,fileLen,BLOCKSIZE,REPLICATION,seed); INode fnode=fsdir.getINode4Write(foo.toString()); assertTrue(fnode.isDirectory()); assertTrue(fnode.isQuotaSet()); Quota.Counts cnt=fnode.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(2,cnt.get(Quota.NAMESPACE)); assertEquals(fileLen * REPLICATION,cnt.get(Quota.DISKSPACE)); }

Class: org.apache.hadoop.hdfs.server.namenode.TestEditLog

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test loading an editlog which has had both its storage fail * on alternating rolls. Two edit log directories are created. * The first one fails on odd rolls, the second on even. Test * that we are able to load the entire editlog regardless. */ @Test public void testAlternatingJournalFailure() throws IOException { File f1=new File(TEST_DIR + "/alternatingjournaltest0"); File f2=new File(TEST_DIR + "/alternatingjournaltest1"); List editUris=ImmutableList.of(f1.toURI(),f2.toURI()); NNStorage storage=setupEdits(editUris,10,new AbortSpec(1,0),new AbortSpec(2,1),new AbortSpec(3,0),new AbortSpec(4,1),new AbortSpec(5,0),new AbortSpec(6,1),new AbortSpec(7,0),new AbortSpec(8,1),new AbortSpec(9,0),new AbortSpec(10,1)); long totaltxnread=0; FSEditLog editlog=getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId=1; Iterable editStreams=editlog.selectInputStreams(startTxId,TXNS_PER_ROLL * 11); for ( EditLogInputStream edits : editStreams) { FSEditLogLoader.EditLogValidation val=FSEditLogLoader.validateEditLog(edits); long read=(val.getEndTxId() - edits.getFirstTxId()) + 1; LOG.info("Loading edits " + edits + " read "+ read); assertEquals(startTxId,edits.getFirstTxId()); startTxId+=read; totaltxnread+=read; } editlog.close(); storage.close(); assertEquals(TXNS_PER_ROLL * 11,totaltxnread); }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test loading an editlog with gaps. A single editlog directory * is set up. On of the edit log files is deleted. This should * fail when selecting the input streams as it will not be able * to select enough streams to load up to 4*TXNS_PER_ROLL. * There should be 4*TXNS_PER_ROLL transactions as we rolled 3 * times. */ @Test public void testLoadingWithGaps() throws IOException { File f1=new File(TEST_DIR + "/gaptest0"); List editUris=ImmutableList.of(f1.toURI()); NNStorage storage=setupEdits(editUris,3); final long startGapTxId=1 * TXNS_PER_ROLL + 1; final long endGapTxId=2 * TXNS_PER_ROLL; File[] files=new File(f1,"current").listFiles(new FilenameFilter(){ @Override public boolean accept( File dir, String name){ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) { return true; } return false; } } ); assertEquals(1,files.length); assertTrue(files[0].delete()); FSEditLog editlog=getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId=1; try { editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL); fail("Should have thrown exception"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Gap in transactions. Expected to be able to read up until " + "at least txid 40 but unable to find any edit logs containing " + "txid 11",ioe); } }

InternalCallVerifier EqualityVerifier 
@Test public void testSyncBatching() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; ExecutorService threadA=Executors.newSingleThreadExecutor(); ExecutorService threadB=Executors.newSingleThreadExecutor(); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); FSImage fsimage=namesystem.getFSImage(); final FSEditLog editLog=fsimage.getEditLog(); assertEquals("should start with only the BEGIN_LOG_SEGMENT txn synced",1,editLog.getSyncTxId()); doLogEdit(threadA,editLog,"thread-a 1"); assertEquals("logging edit without syncing should do not affect txid",1,editLog.getSyncTxId()); doLogEdit(threadB,editLog,"thread-b 1"); assertEquals("logging edit without syncing should do not affect txid",1,editLog.getSyncTxId()); doCallLogSync(threadB,editLog); assertEquals("logSync from second thread should bump txid up to 3",3,editLog.getSyncTxId()); doCallLogSync(threadA,editLog); assertEquals("logSync from first thread shouldn't change txid",3,editLog.getSyncTxId()); assertCounter("TransactionsBatchedInSync",1L,getMetrics("NameNodeActivity")); } finally { threadA.shutdown(); threadB.shutdown(); if (fileSys != null) fileSys.close(); if (cluster != null) cluster.shutdown(); } }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test edit log failover. If a single edit log is missing, other * edits logs should be used instead. */ @Test public void testEditLogFailOverFromMissing() throws IOException { File f1=new File(TEST_DIR + "/failover0"); File f2=new File(TEST_DIR + "/failover1"); List editUris=ImmutableList.of(f1.toURI(),f2.toURI()); NNStorage storage=setupEdits(editUris,3); final long startErrorTxId=1 * TXNS_PER_ROLL + 1; final long endErrorTxId=2 * TXNS_PER_ROLL; File[] files=new File(f1,"current").listFiles(new FilenameFilter(){ @Override public boolean accept( File dir, String name){ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) { return true; } return false; } } ); assertEquals(1,files.length); assertTrue(files[0].delete()); FSEditLog editlog=getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId=1; Collection streams=null; try { streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL); readAllEdits(streams,startTxId); } catch ( IOException e) { LOG.error("edit log failover didn't work",e); fail("Edit log failover didn't work"); } finally { IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0])); } }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test edit log failover from a corrupt edit log */ @Test public void testEditLogFailOverFromCorrupt() throws IOException { File f1=new File(TEST_DIR + "/failover0"); File f2=new File(TEST_DIR + "/failover1"); List editUris=ImmutableList.of(f1.toURI(),f2.toURI()); NNStorage storage=setupEdits(editUris,3); final long startErrorTxId=1 * TXNS_PER_ROLL + 1; final long endErrorTxId=2 * TXNS_PER_ROLL; File[] files=new File(f1,"current").listFiles(new FilenameFilter(){ @Override public boolean accept( File dir, String name){ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,endErrorTxId))) { return true; } return false; } } ); assertEquals(1,files.length); long fileLen=files[0].length(); LOG.debug("Corrupting Log File: " + files[0] + " len: "+ fileLen); RandomAccessFile rwf=new RandomAccessFile(files[0],"rw"); rwf.seek(fileLen - 4); int b=rwf.readInt(); rwf.seek(fileLen - 4); rwf.writeInt(b + 1); rwf.close(); FSEditLog editlog=getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId=1; Collection streams=null; try { streams=editlog.selectInputStreams(startTxId,4 * TXNS_PER_ROLL); readAllEdits(streams,startTxId); } catch ( IOException e) { LOG.error("edit log failover didn't work",e); fail("Edit log failover didn't work"); } finally { IOUtils.cleanup(null,streams.toArray(new EditLogInputStream[0])); } }

APIUtilityVerifier EqualityVerifier 
/** * Test case for an empty edit log from a prior version of Hadoop. */ @Test public void testPreTxIdEditLogNoEdits() throws Exception { FSNamesystem namesys=Mockito.mock(FSNamesystem.class); namesys.dir=Mockito.mock(FSDirectory.class); long numEdits=testLoad(StringUtils.hexStringToByte("ffffffed"),namesys); assertEquals(0,numEdits); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test case for loading a very simple edit log from a format * prior to the inclusion of edit transaction IDs in the log. */ @Test public void testPreTxidEditLogWithEdits() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); final FSNamesystem namesystem=cluster.getNamesystem(); long numEdits=testLoad(HADOOP20_SOME_EDITS,namesystem); assertEquals(3,numEdits); HdfsFileStatus fileInfo=namesystem.getFileInfo("/myfile",false); assertEquals("supergroup",fileInfo.getGroup()); assertEquals(3,fileInfo.getReplication()); } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier EqualityVerifier 
/** * Test what happens with the following sequence: * Thread A writes edit * Thread B calls logSyncAll * calls close() on stream * Thread A calls logSync * This sequence is legal and can occur if enterSafeMode() is closely * followed by saveNamespace. */ @Test public void testBatchedSyncWithClosedLogs() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; ExecutorService threadA=Executors.newSingleThreadExecutor(); ExecutorService threadB=Executors.newSingleThreadExecutor(); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); FSImage fsimage=namesystem.getFSImage(); final FSEditLog editLog=fsimage.getEditLog(); doLogEdit(threadA,editLog,"thread-a 1"); assertEquals("logging edit without syncing should do not affect txid",1,editLog.getSyncTxId()); doCallLogSyncAll(threadB,editLog); assertEquals("logSyncAll should sync thread A's transaction",2,editLog.getSyncTxId()); editLog.close(); doCallLogSync(threadA,editLog); } finally { threadA.shutdown(); threadB.shutdown(); if (fileSys != null) fileSys.close(); if (cluster != null) cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests the getEditLogManifest function using mock storage for a number * of different situations. */ @Test public void testEditLogManifestMocks() throws IOException { NNStorage storage; FSEditLog log; storage=mockStorageWithEdits("[1,100]|[101,200]|[201,]","[1,100]|[101,200]|[201,]"); log=getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString()); assertEquals("[[101,200]]",log.getEditLogManifest(101).toString()); storage=mockStorageWithEdits("[1,100]|[101,200]","[1,100]|[201,300]|[301,400]"); log=getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[1,100], [101,200], [201,300], [301,400]]",log.getEditLogManifest(1).toString()); storage=mockStorageWithEdits("[1,100]|[301,400]","[301,400]|[401,500]"); log=getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[301,400], [401,500]]",log.getEditLogManifest(1).toString()); storage=mockStorageWithEdits("[1,100]|[101,150]","[1,50]|[101,200]"); log=getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString()); assertEquals("[[101,200]]",log.getEditLogManifest(101).toString()); storage=mockStorageWithEdits("[1,100]|[101,]","[1,100]|[101,200]"); log=getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[1,100], [101,200]]",log.getEditLogManifest(1).toString()); assertEquals("[[101,200]]",log.getEditLogManifest(101).toString()); }

APIUtilityVerifier UtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testEditChecksum() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); FSImage fsimage=namesystem.getFSImage(); final FSEditLog editLog=fsimage.getEditLog(); fileSys.mkdirs(new Path("/tmp")); Iterator iter=fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); LinkedList sds=new LinkedList(); while (iter.hasNext()) { sds.add(iter.next()); } editLog.close(); cluster.shutdown(); for ( StorageDirectory sd : sds) { File editFile=NNStorage.getFinalizedEditsFile(sd,1,3); assertTrue(editFile.exists()); long fileLen=editFile.length(); LOG.debug("Corrupting Log File: " + editFile + " len: "+ fileLen); RandomAccessFile rwf=new RandomAccessFile(editFile,"rw"); rwf.seek(fileLen - 4); int b=rwf.readInt(); rwf.seek(fileLen - 4); rwf.writeInt(b + 1); rwf.close(); } try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build(); fail("should not be able to start"); } catch ( IOException e) { assertNotNull("Cause of exception should be ChecksumException",e.getCause()); assertEquals("Cause of exception should be ChecksumException",ChecksumException.class,e.getCause().getClass()); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestEditLogFileInputStream

APIUtilityVerifier EqualityVerifier ConditionMatcher HybridVerifier 
@Test public void testReadURL() throws Exception { HttpURLConnection conn=mock(HttpURLConnection.class); doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream(); doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode(); doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length"); URLConnectionFactory factory=mock(URLConnectionFactory.class); doReturn(conn).when(factory).openConnection(Mockito.any(),anyBoolean()); URL url=new URL("http://localhost/fakeLog"); EditLogInputStream elis=EditLogFileInputStream.fromUrl(factory,url,HdfsConstants.INVALID_TXID,HdfsConstants.INVALID_TXID,false); EnumMap> counts=FSImageTestUtil.countEditLogOpTypes(elis); assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held,is(1)); assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held,is(1)); assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held,is(1)); assertEquals(FAKE_LOG_DATA.length,elis.length()); elis.close(); }

Class: org.apache.hadoop.hdfs.server.namenode.TestEditLogRace

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Tests rolling edit logs while transactions are ongoing. */ @Test public void testEditLogRolling() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; AtomicReference caughtErr=new AtomicReference(); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); FSImage fsimage=namesystem.getFSImage(); StorageDirectory sd=fsimage.getStorage().getStorageDir(0); startTransactionWorkers(namesystem,caughtErr); long previousLogTxId=1; for (int i=0; i < NUM_ROLLS && caughtErr.get() == null; i++) { try { Thread.sleep(20); } catch ( InterruptedException e) { } LOG.info("Starting roll " + i + "."); CheckpointSignature sig=namesystem.rollEditLog(); long nextLog=sig.curSegmentTxId; String logFileName=NNStorage.getFinalizedEditsFileName(previousLogTxId,nextLog - 1); previousLogTxId+=verifyEditLogs(namesystem,fsimage,logFileName,previousLogTxId); assertEquals(previousLogTxId,nextLog); File expectedLog=NNStorage.getInProgressEditsFile(sd,previousLogTxId); assertTrue("Expect " + expectedLog + " to exist",expectedLog.exists()); } } finally { stopTransactionWorkers(); if (caughtErr.get() != null) { throw new RuntimeException(caughtErr.get()); } if (fileSys != null) fileSys.close(); if (cluster != null) cluster.shutdown(); } }

IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Tests saving fs image while transactions are ongoing. */ @Test public void testSaveNamespace() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; AtomicReference caughtErr=new AtomicReference(); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); FSImage fsimage=namesystem.getFSImage(); FSEditLog editLog=fsimage.getEditLog(); startTransactionWorkers(namesystem,caughtErr); for (int i=0; i < NUM_SAVE_IMAGE && caughtErr.get() == null; i++) { try { Thread.sleep(20); } catch ( InterruptedException e) { } LOG.info("Save " + i + ": entering safe mode"); namesystem.enterSafeMode(false); long logStartTxId=fsimage.getStorage().getMostRecentCheckpointTxId() + 1; verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(logStartTxId),logStartTxId); LOG.info("Save " + i + ": saving namespace"); namesystem.saveNamespace(); LOG.info("Save " + i + ": leaving safemode"); long savedImageTxId=fsimage.getStorage().getMostRecentCheckpointTxId(); verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(logStartTxId,savedImageTxId),logStartTxId); assertEquals(fsimage.getStorage().getMostRecentCheckpointTxId(),editLog.getLastWrittenTxId() - 1); namesystem.leaveSafeMode(); LOG.info("Save " + i + ": complete"); } } finally { stopTransactionWorkers(); if (caughtErr.get() != null) { throw new RuntimeException(caughtErr.get()); } if (fileSys != null) fileSys.close(); if (cluster != null) cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * The logSync() method in FSEditLog is unsynchronized whiel syncing * so that other threads can concurrently enqueue edits while the prior * sync is ongoing. This test checks that the log is saved correctly * if the saveImage occurs while the syncing thread is in the unsynchronized middle section. * This replicates the following manual test proposed by Konstantin: * I start the name-node in debugger. * I do -mkdir and stop the debugger in logSync() just before it does flush. * Then I enter safe mode with another client * I start saveNamepsace and stop the debugger in * FSImage.saveFSImage() -> FSEditLog.createEditLogFile() * -> EditLogFileOutputStream.create() -> * after truncating the file but before writing LAYOUT_VERSION into it. * Then I let logSync() run. * Then I terminate the name-node. * After that the name-node wont start, since the edits file is broken. */ @Test public void testSaveImageWhileSyncInProgress() throws Exception { Configuration conf=getConf(); NameNode.initMetrics(conf,NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf); try { FSImage fsimage=namesystem.getFSImage(); FSEditLog editLog=fsimage.getEditLog(); JournalAndStream jas=editLog.getJournals().get(0); EditLogFileOutputStream spyElos=spy((EditLogFileOutputStream)jas.getCurrentStream()); jas.setCurrentStreamForTests(spyElos); final AtomicReference deferredException=new AtomicReference(); final CountDownLatch waitToEnterFlush=new CountDownLatch(1); final Thread doAnEditThread=new Thread(){ @Override public void run(){ try { LOG.info("Starting mkdirs"); namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true); LOG.info("mkdirs complete"); } catch ( Throwable ioe) { LOG.fatal("Got exception",ioe); deferredException.set(ioe); waitToEnterFlush.countDown(); } } } ; Answer blockingFlush=new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { LOG.info("Flush called"); if (Thread.currentThread() == doAnEditThread) { LOG.info("edit thread: Telling main thread we made it to flush section..."); waitToEnterFlush.countDown(); LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs"); Thread.sleep(BLOCK_TIME * 1000); LOG.info("Going through to flush. This will allow the main thread to continue."); } invocation.callRealMethod(); LOG.info("Flush complete"); return null; } } ; doAnswer(blockingFlush).when(spyElos).flush(); doAnEditThread.start(); LOG.info("Main thread: waiting to enter flush..."); waitToEnterFlush.await(); assertNull(deferredException.get()); LOG.info("Main thread: detected that logSync is in unsynchronized section."); LOG.info("Trying to enter safe mode."); LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long"); long st=Time.now(); namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); long et=Time.now(); LOG.info("Entered safe mode"); assertTrue(et - st > (BLOCK_TIME - 1) * 1000); namesystem.saveNamespace(); LOG.info("Joining on edit thread..."); doAnEditThread.join(); assertNull(deferredException.get()); assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1)); assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4)); } finally { LOG.info("Closing namesystem"); if (namesystem != null) namesystem.close(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Most of the FSNamesystem methods have a synchronized section where they * update the name system itself and write to the edit log, and then * unsynchronized, they call logSync. This test verifies that, if an * operation has written to the edit log but not yet synced it, * we wait for that sync before entering safe mode. */ @Test public void testSaveRightBeforeSync() throws Exception { Configuration conf=getConf(); NameNode.initMetrics(conf,NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); final FSNamesystem namesystem=FSNamesystem.loadFromDisk(conf); try { FSImage fsimage=namesystem.getFSImage(); FSEditLog editLog=spy(fsimage.getEditLog()); fsimage.editLog=editLog; final AtomicReference deferredException=new AtomicReference(); final CountDownLatch waitToEnterSync=new CountDownLatch(1); final Thread doAnEditThread=new Thread(){ @Override public void run(){ try { LOG.info("Starting mkdirs"); namesystem.mkdirs("/test",new PermissionStatus("test","test",new FsPermission((short)00755)),true); LOG.info("mkdirs complete"); } catch ( Throwable ioe) { LOG.fatal("Got exception",ioe); deferredException.set(ioe); waitToEnterSync.countDown(); } } } ; Answer blockingSync=new Answer(){ @Override public Void answer( InvocationOnMock invocation) throws Throwable { LOG.info("logSync called"); if (Thread.currentThread() == doAnEditThread) { LOG.info("edit thread: Telling main thread we made it just before logSync..."); waitToEnterSync.countDown(); LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs"); Thread.sleep(BLOCK_TIME * 1000); LOG.info("Going through to logSync. This will allow the main thread to continue."); } invocation.callRealMethod(); LOG.info("logSync complete"); return null; } } ; doAnswer(blockingSync).when(editLog).logSync(); doAnEditThread.start(); LOG.info("Main thread: waiting to just before logSync..."); waitToEnterSync.await(); assertNull(deferredException.get()); LOG.info("Main thread: detected that logSync about to be called."); LOG.info("Trying to enter safe mode."); LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits"); long st=Time.now(); namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); long et=Time.now(); LOG.info("Entered safe mode"); assertTrue(et - st > (BLOCK_TIME - 1) * 1000); namesystem.saveNamespace(); LOG.info("Joining on edit thread..."); doAnEditThread.join(); assertNull(deferredException.get()); assertEquals(3,verifyEditLogs(namesystem,fsimage,NNStorage.getFinalizedEditsFileName(1,3),1)); assertEquals(1,verifyEditLogs(namesystem,fsimage,NNStorage.getInProgressEditsFileName(4),4)); } finally { LOG.info("Closing namesystem"); if (namesystem != null) namesystem.close(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestEditsDoubleBuffer

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDoubleBuffer() throws IOException { EditsDoubleBuffer buf=new EditsDoubleBuffer(1024); assertTrue(buf.isFlushed()); byte[] data=new byte[100]; buf.writeRaw(data,0,data.length); assertEquals("Should count new data correctly",data.length,buf.countBufferedBytes()); assertTrue("Writing to current buffer should not affect flush state",buf.isFlushed()); buf.setReadyToFlush(); assertEquals("Swapping buffers should still count buffered bytes",data.length,buf.countBufferedBytes()); assertFalse(buf.isFlushed()); DataOutputBuffer outBuf=new DataOutputBuffer(); buf.flushTo(outBuf); assertEquals(data.length,outBuf.getLength()); assertTrue(buf.isFlushed()); assertEquals(0,buf.countBufferedBytes()); buf.writeRaw(data,0,data.length); assertEquals("Should count new data correctly",data.length,buf.countBufferedBytes()); buf.setReadyToFlush(); buf.flushTo(outBuf); assertEquals(data.length * 2,outBuf.getLength()); assertEquals(0,buf.countBufferedBytes()); outBuf.close(); }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSDirectory

IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testXAttrMultiAddRemoveErrors() throws Exception { List existingXAttrs=Lists.newArrayList(); List toAdd=Lists.newArrayList(); toAdd.add(generatedXAttrs.get(0)); toAdd.add(generatedXAttrs.get(1)); toAdd.add(generatedXAttrs.get(2)); toAdd.add(generatedXAttrs.get(0)); try { fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE)); fail("Specified the same xattr to be set twice"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Cannot specify the same " + "XAttr to be set",e); } toAdd.remove(generatedXAttrs.get(0)); existingXAttrs.add(generatedXAttrs.get(0)); try { fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE)); fail("Set XAttr that is already set without REPLACE flag"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("already exists",e); } try { fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE)); fail("Set XAttr that does not exist without the CREATE flag"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("does not exist",e); } toAdd.remove(generatedXAttrs.get(0)); List newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE)); assertEquals("Unexpected toAdd size",2,toAdd.size()); for ( XAttr x : toAdd) { assertTrue("Did not find added XAttr " + x,newXAttrs.contains(x)); } existingXAttrs=newXAttrs; toAdd=Lists.newArrayList(); for (int i=0; i < 3; i++) { XAttr xAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a" + i).setValue(new byte[]{(byte)(i * 2)}).build(); toAdd.add(xAttr); } newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.REPLACE)); assertEquals("Unexpected number of new XAttrs",3,newXAttrs.size()); for (int i=0; i < 3; i++) { assertArrayEquals("Unexpected XAttr value",new byte[]{(byte)(i * 2)},newXAttrs.get(i).getValue()); } existingXAttrs=newXAttrs; toAdd=Lists.newArrayList(); for (int i=0; i < 4; i++) { toAdd.add(generatedXAttrs.get(i)); } newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); verifyXAttrsPresent(newXAttrs,4); }

APIUtilityVerifier IterativeVerifier EqualityVerifier 
/** * Test setting and removing multiple xattrs via single operations */ @Test(timeout=300000) public void testXAttrMultiSetRemove() throws Exception { List existingXAttrs=Lists.newArrayListWithCapacity(0); final Random rand=new Random(0xFEEDA); int numExpectedXAttrs=0; while (numExpectedXAttrs < numGeneratedXAttrs) { LOG.info("Currently have " + numExpectedXAttrs + " xattrs"); final int numToAdd=rand.nextInt(5) + 1; List toAdd=Lists.newArrayListWithCapacity(numToAdd); for (int i=0; i < numToAdd; i++) { if (numExpectedXAttrs >= numGeneratedXAttrs) { break; } toAdd.add(generatedXAttrs.get(numExpectedXAttrs)); numExpectedXAttrs++; } LOG.info("Attempting to add " + toAdd.size() + " XAttrs"); for (int i=0; i < toAdd.size(); i++) { LOG.info("Will add XAttr " + toAdd.get(i)); } List newXAttrs=fsdir.setINodeXAttrs(existingXAttrs,toAdd,EnumSet.of(XAttrSetFlag.CREATE)); verifyXAttrsPresent(newXAttrs,numExpectedXAttrs); existingXAttrs=newXAttrs; } while (numExpectedXAttrs > 0) { LOG.info("Currently have " + numExpectedXAttrs + " xattrs"); final int numToRemove=rand.nextInt(5) + 1; List toRemove=Lists.newArrayListWithCapacity(numToRemove); for (int i=0; i < numToRemove; i++) { if (numExpectedXAttrs == 0) { break; } toRemove.add(generatedXAttrs.get(numExpectedXAttrs - 1)); numExpectedXAttrs--; } final int expectedNumToRemove=toRemove.size(); LOG.info("Attempting to remove " + expectedNumToRemove + " XAttrs"); List removedXAttrs=Lists.newArrayList(); List newXAttrs=fsdir.filterINodeXAttrs(existingXAttrs,toRemove,removedXAttrs); assertEquals("Unexpected number of removed XAttrs",expectedNumToRemove,removedXAttrs.size()); verifyXAttrsPresent(newXAttrs,numExpectedXAttrs); existingXAttrs=newXAttrs; } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testINodeXAttrsLimit() throws Exception { List existingXAttrs=Lists.newArrayListWithCapacity(2); XAttr xAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(new byte[]{0x31,0x32,0x33}).build(); XAttr xAttr2=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a2").setValue(new byte[]{0x31,0x31,0x31}).build(); existingXAttrs.add(xAttr1); existingXAttrs.add(xAttr2); XAttr newSystemXAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).setName("a3").setValue(new byte[]{0x33,0x33,0x33}).build(); XAttr newRawXAttr=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.RAW).setName("a3").setValue(new byte[]{0x33,0x33,0x33}).build(); List newXAttrs=Lists.newArrayListWithCapacity(2); newXAttrs.add(newSystemXAttr); newXAttrs.add(newRawXAttr); List xAttrs=fsdir.setINodeXAttrs(existingXAttrs,newXAttrs,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); assertEquals(xAttrs.size(),4); XAttr newXAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.TRUSTED).setName("a4").setValue(new byte[]{0x34,0x34,0x34}).build(); newXAttrs.set(0,newXAttr1); try { fsdir.setINodeXAttrs(existingXAttrs,newXAttrs,EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE)); fail("Setting user visible xattr on inode should fail if " + "reaching limit."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " + "to inode, would exceed limit",e); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSEditLogLoader

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testFSEditLogOpCodes() throws IOException { for ( FSEditLogOpCodes c : FSEditLogOpCodes.values()) { final byte code=c.getOpCode(); assertEquals("c=" + c + ", code="+ code,c,FSEditLogOpCodes.fromByte(code)); } for (int b=0; b < (1 << Byte.SIZE); b++) { final byte code=(byte)b; assertEquals("b=" + b + ", code="+ code,fromByte(code),FSEditLogOpCodes.fromByte(code)); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testValidateEmptyEditLog() throws IOException { File testDir=new File(TEST_DIR,"testValidateEmptyEditLog"); SortedMap offsetToTxId=Maps.newTreeMap(); File logFile=prepareUnfinalizedTestEditLog(testDir,0,offsetToTxId); truncateFile(logFile,8); EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile); assertTrue(!validation.hasCorruptHeader()); assertEquals(HdfsConstants.INVALID_TXID,validation.getEndTxId()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testValidateEditLogWithCorruptBody() throws IOException { File testDir=new File(TEST_DIR,"testValidateEditLogWithCorruptBody"); SortedMap offsetToTxId=Maps.newTreeMap(); final int NUM_TXNS=20; File logFile=prepareUnfinalizedTestEditLog(testDir,NUM_TXNS,offsetToTxId); File logFileBak=new File(testDir,logFile.getName() + ".bak"); Files.copy(logFile,logFileBak); EditLogValidation validation=EditLogFileInputStream.validateEditLog(logFile); assertTrue(!validation.hasCorruptHeader()); assertEquals(NUM_TXNS + 1,validation.getEndTxId()); for ( Map.Entry entry : offsetToTxId.entrySet()) { long txOffset=entry.getKey(); long txId=entry.getValue(); Files.copy(logFileBak,logFile); corruptByteInFile(logFile,txOffset); validation=EditLogFileInputStream.validateEditLog(logFile); long expectedEndTxId=(txId == (NUM_TXNS + 1)) ? NUM_TXNS : (NUM_TXNS + 1); assertEquals("Failed when corrupting txn opcode at " + txOffset,expectedEndTxId,validation.getEndTxId()); assertTrue(!validation.hasCorruptHeader()); } for ( Map.Entry entry : offsetToTxId.entrySet()) { long txOffset=entry.getKey(); long txId=entry.getValue(); Files.copy(logFileBak,logFile); truncateFile(logFile,txOffset); validation=EditLogFileInputStream.validateEditLog(logFile); long expectedEndTxId=(txId == 0) ? HdfsConstants.INVALID_TXID : (txId - 1); assertEquals("Failed when corrupting txid " + txId + " txn opcode "+ "at "+ txOffset,expectedEndTxId,validation.getEndTxId()); assertTrue(!validation.hasCorruptHeader()); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSImage

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Ensure that the digest written by the saver equals to the digest of the * file. */ @Test public void testDigest() throws IOException { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); DistributedFileSystem fs=cluster.getFileSystem(); fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); fs.saveNamespace(); fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); File currentDir=FSImageTestUtil.getNameNodeCurrentDirs(cluster,0).get(0); File fsimage=FSImageTestUtil.findNewestImageFile(currentDir.getAbsolutePath()); assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),MD5FileUtils.computeMd5ForFile(fsimage)); } finally { if (cluster != null) { cluster.shutdown(); } } }

InternalCallVerifier EqualityVerifier 
/** * Ensure mtime and atime can be loaded from fsimage. */ @Test(timeout=60000) public void testLoadMtimeAtime() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem hdfs=cluster.getFileSystem(); String userDir=hdfs.getHomeDirectory().toUri().getPath().toString(); Path file=new Path(userDir,"file"); Path dir=new Path(userDir,"/dir"); Path link=new Path(userDir,"/link"); hdfs.createNewFile(file); hdfs.mkdirs(dir); hdfs.createSymlink(file,link,false); long mtimeFile=hdfs.getFileStatus(file).getModificationTime(); long atimeFile=hdfs.getFileStatus(file).getAccessTime(); long mtimeDir=hdfs.getFileStatus(dir).getModificationTime(); long mtimeLink=hdfs.getFileLinkStatus(link).getModificationTime(); long atimeLink=hdfs.getFileLinkStatus(link).getAccessTime(); hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE); cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(1).build(); cluster.waitActive(); hdfs=cluster.getFileSystem(); assertEquals(mtimeFile,hdfs.getFileStatus(file).getModificationTime()); assertEquals(atimeFile,hdfs.getFileStatus(file).getAccessTime()); assertEquals(mtimeDir,hdfs.getFileStatus(dir).getModificationTime()); assertEquals(mtimeLink,hdfs.getFileLinkStatus(link).getModificationTime()); assertEquals(atimeLink,hdfs.getFileLinkStatus(link).getAccessTime()); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSImageStorageInspector

InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Simple test with image, edits, and inprogress edits */ @Test public void testCurrentStorageInspector() throws IOException { FSImageTransactionalStorageInspector inspector=new FSImageTransactionalStorageInspector(); StorageDirectory mockDir=FSImageTestUtil.mockStorageDirectory(NameNodeDirType.IMAGE_AND_EDITS,false,"/foo/current/" + getImageFileName(123),"/foo/current/" + getFinalizedEditsFileName(123,456),"/foo/current/" + getImageFileName(456),"/foo/current/" + getInProgressEditsFileName(457)); inspector.inspectDirectory(mockDir); assertEquals(2,inspector.foundImages.size()); FSImageFile latestImage=inspector.getLatestImages().get(0); assertEquals(456,latestImage.txId); assertSame(mockDir,latestImage.sd); assertTrue(inspector.isUpgradeFinalized()); assertEquals(new File("/foo/current/" + getImageFileName(456)),latestImage.getFile()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSImageWithSnapshot

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test when there is snapshot taken on root */ @Test public void testSnapshotOnRoot() throws Exception { final Path root=new Path("/"); hdfs.allowSnapshot(root); hdfs.createSnapshot(root,"s1"); cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build(); cluster.waitActive(); fsn=cluster.getNamesystem(); hdfs=cluster.getFileSystem(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build(); cluster.waitActive(); fsn=cluster.getNamesystem(); hdfs=cluster.getFileSystem(); INodeDirectory rootNode=fsn.dir.getINode4Write(root.toString()).asDirectory(); assertTrue("The children list of root should be empty",rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty()); List diffList=rootNode.getDiffs().asList(); assertEquals(1,diffList.size()); Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes("s1")); assertEquals(s1.getId(),diffList.get(0).getSnapshotId()); assertEquals(1,fsn.getSnapshotManager().getNumSnapshottableDirs()); SnapshottableDirectoryStatus[] sdirs=fsn.getSnapshotManager().getSnapshottableDirListing(null); assertEquals(root,sdirs[0].getFullPath()); hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build(); cluster.waitActive(); fsn=cluster.getNamesystem(); hdfs=cluster.getFileSystem(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test fsimage loading when 1) there is an empty file loaded from fsimage, * and 2) there is later an append operation to be applied from edit log. */ @Test(timeout=60000) public void testLoadImageWithEmptyFile() throws Exception { Path file=new Path(dir,"file"); FSDataOutputStream out=hdfs.create(file); out.close(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); out=hdfs.append(file); out.write(1); out.close(); cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPLICATION).build(); cluster.waitActive(); hdfs=cluster.getFileSystem(); FileStatus status=hdfs.getFileStatus(file); assertEquals(1,status.getLen()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestFSNamesystem

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that FSNamesystem#clear clears all leases. */ @Test public void testFSNamespaceClearLeases() throws Exception { Configuration conf=new HdfsConfiguration(); File nameDir=new File(MiniDFSCluster.getBaseDirectory(),"name"); conf.set(DFS_NAMENODE_NAME_DIR_KEY,nameDir.getAbsolutePath()); NameNode.initMetrics(conf,NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); FSNamesystem fsn=FSNamesystem.loadFromDisk(conf); LeaseManager leaseMan=fsn.getLeaseManager(); leaseMan.addLease("client1","importantFile"); assertEquals(1,leaseMan.countLease()); fsn.clear(); leaseMan=fsn.getLeaseManager(); assertEquals(0,leaseMan.countLease()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests that the namenode edits dirs are gotten with duplicates removed */ @Test public void testUniqueEditDirs() throws IOException { Configuration config=new Configuration(); config.set(DFS_NAMENODE_EDITS_DIR_KEY,"file://edits/dir, " + "file://edits/dir1,file://edits/dir1"); Collection editsDirs=FSNamesystem.getNamespaceEditsDirs(config); assertEquals(2,editsDirs.size()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFSNamesystemLockCompatibility(){ FSNamesystemLock rwLock=new FSNamesystemLock(true); assertEquals(0,rwLock.getReadHoldCount()); rwLock.readLock().lock(); assertEquals(1,rwLock.getReadHoldCount()); rwLock.readLock().lock(); assertEquals(2,rwLock.getReadHoldCount()); rwLock.readLock().unlock(); assertEquals(1,rwLock.getReadHoldCount()); rwLock.readLock().unlock(); assertEquals(0,rwLock.getReadHoldCount()); assertFalse(rwLock.isWriteLockedByCurrentThread()); assertEquals(0,rwLock.getWriteHoldCount()); rwLock.writeLock().lock(); assertTrue(rwLock.isWriteLockedByCurrentThread()); assertEquals(1,rwLock.getWriteHoldCount()); rwLock.writeLock().lock(); assertTrue(rwLock.isWriteLockedByCurrentThread()); assertEquals(2,rwLock.getWriteHoldCount()); rwLock.writeLock().unlock(); assertTrue(rwLock.isWriteLockedByCurrentThread()); assertEquals(1,rwLock.getWriteHoldCount()); rwLock.writeLock().unlock(); assertFalse(rwLock.isWriteLockedByCurrentThread()); assertEquals(0,rwLock.getWriteHoldCount()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestFavoredNodesEndToEnd

APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=180000) public void testWhenSomeNodesAreNotGood() throws Exception { final InetSocketAddress addrs[]=new InetSocketAddress[4]; final String[] hosts=new String[addrs.length]; for (int i=0; i < addrs.length; i++) { addrs[i]=datanodes.get(i).getXferAddress(); hosts[i]=addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort(); } DatanodeInfo d=cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager().getDatanodeByXferAddr(addrs[0].getAddress().getHostAddress(),addrs[0].getPort()); d.setDecommissioned(); Path p=new Path("/filename-foo-bar-baz"); final short replication=(short)3; FSDataOutputStream out=dfs.create(p,FsPermission.getDefault(),true,4096,replication,4096L,null,addrs); out.write(SOME_BYTES); out.close(); d.stopDecommission(); BlockLocation[] locations=getBlockLocations(p); Assert.assertEquals(replication,locations[0].getNames().length); ; for (int i=0; i < replication; i++) { final String loc=locations[0].getNames()[i]; int j=0; for (; j < hosts.length && !loc.equals(hosts[j]); j++) ; Assert.assertTrue("j=" + j,j > 0); Assert.assertTrue("loc=" + loc + " not in host list "+ Arrays.asList(hosts)+ ", j="+ j,j < hosts.length); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestFileJournalManager

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that inprogress files are handled correct. Set up a single * edits directory. Fail on after the last roll. Then verify that the * logs have the expected number of transactions. */ @Test public void testInprogressRecovery() throws IOException { File f=new File(TestEditLog.TEST_DIR + "/inprogressrecovery"); NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),5,new AbortSpec(5,0)); StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next(); FileJournalManager jm=new FileJournalManager(conf,sd,storage); assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false)); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that in-progress streams aren't counted if we don't ask for * them. */ @Test public void testExcludeInProgressStreams() throws CorruptionException, IOException { File f=new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams"); NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,false); StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next(); FileJournalManager jm=new FileJournalManager(conf,sd,storage); assertEquals(100,getNumberOfTransactions(jm,1,false,false)); EditLogInputStream elis=getJournalInputStream(jm,90,false); try { FSEditLogOp lastReadOp=null; while ((lastReadOp=elis.readOp()) != null) { assertTrue(lastReadOp.getTransactionId() <= 100); } } finally { IOUtils.cleanup(LOG,elis); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that we receive the correct number of transactions when we count * the number of transactions around gaps. * Set up a single edits directory, with no failures. Delete the 4th logfile. * Test that getNumberOfTransactions returns the correct number of * transactions before this gap and after this gap. Also verify that if you * try to count on the gap that an exception is thrown. */ @Test public void testManyLogsWithGaps() throws IOException { File f=new File(TestEditLog.TEST_DIR + "/manylogswithgaps"); NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10); StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next(); final long startGapTxId=3 * TXNS_PER_ROLL + 1; final long endGapTxId=4 * TXNS_PER_ROLL; File[] files=new File(f,"current").listFiles(new FilenameFilter(){ @Override public boolean accept( File dir, String name){ if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,endGapTxId))) { return true; } return false; } } ); assertEquals(1,files.length); assertTrue(files[0].delete()); FileJournalManager jm=new FileJournalManager(conf,sd,storage); assertEquals(startGapTxId - 1,getNumberOfTransactions(jm,1,true,true)); assertEquals(0,getNumberOfTransactions(jm,startGapTxId,true,true)); assertEquals(11 * TXNS_PER_ROLL - endGapTxId,getNumberOfTransactions(jm,endGapTxId + 1,true,true)); }

EqualityVerifier 
@Test public void testGetRemoteEditLog() throws IOException { StorageDirectory sd=FSImageTestUtil.mockStorageDirectory(NameNodeDirType.EDITS,false,NNStorage.getFinalizedEditsFileName(1,100),NNStorage.getFinalizedEditsFileName(101,200),NNStorage.getInProgressEditsFileName(201),NNStorage.getFinalizedEditsFileName(1001,1100)); FileJournalManager fjm=new FileJournalManager(conf,sd,null); assertEquals("[1,100],[101,200],[1001,1100]",getLogsAsString(fjm,1)); assertEquals("[101,200],[1001,1100]",getLogsAsString(fjm,101)); assertEquals("[101,200],[1001,1100]",getLogsAsString(fjm,150)); assertEquals("[1001,1100]",getLogsAsString(fjm,201)); assertEquals("Asking for a newer log than exists should return empty list","",getLogsAsString(fjm,9999)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test the normal operation of loading transactions from * file journal manager. 3 edits directories are setup without any * failures. Test that we read in the expected number of transactions. */ @Test public void testNormalOperation() throws IOException { File f1=new File(TestEditLog.TEST_DIR + "/normtest0"); File f2=new File(TestEditLog.TEST_DIR + "/normtest1"); File f3=new File(TestEditLog.TEST_DIR + "/normtest2"); List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI()); NNStorage storage=setupEdits(editUris,5); long numJournals=0; for ( StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) { FileJournalManager jm=new FileJournalManager(conf,sd,storage); assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false)); numJournals++; } assertEquals(3,numJournals); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Make requests with starting transaction ids which don't match the beginning * txid of some log segments. * This should succeed. */ @Test public void testAskForTransactionsMidfile() throws IOException { File f=new File(TestEditLog.TEST_DIR + "/askfortransactionsmidfile"); NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10); StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next(); FileJournalManager jm=new FileJournalManager(conf,sd,storage); final int TOTAL_TXIDS=10 * 11; for (int txid=1; txid <= TOTAL_TXIDS; txid++) { assertEquals((TOTAL_TXIDS - txid) + 1,getNumberOfTransactions(jm,txid,true,false)); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that we can load an edits directory with a corrupt inprogress file. * The corrupt inprogress file should be moved to the side. */ @Test public void testManyLogsWithCorruptInprogress() throws IOException { File f=new File(TestEditLog.TEST_DIR + "/manylogswithcorruptinprogress"); NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0)); StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next(); File[] files=new File(f,"current").listFiles(new FilenameFilter(){ @Override public boolean accept( File dir, String name){ if (name.startsWith("edits_inprogress")) { return true; } return false; } } ); assertEquals(files.length,1); corruptAfterStartSegment(files[0]); FileJournalManager jm=new FileJournalManager(conf,sd,storage); assertEquals(10 * TXNS_PER_ROLL + 1,getNumberOfTransactions(jm,1,true,false)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that FileJournalManager behaves correctly despite inprogress * files in all its edit log directories. Set up 3 directories and fail * all on the last roll. Verify that the correct number of transaction * are then loaded. */ @Test public void testInprogressRecoveryAll() throws IOException { File f1=new File(TestEditLog.TEST_DIR + "/failalltest0"); File f2=new File(TestEditLog.TEST_DIR + "/failalltest1"); File f3=new File(TestEditLog.TEST_DIR + "/failalltest2"); List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI()); NNStorage storage=setupEdits(editUris,5,new AbortSpec(5,0),new AbortSpec(5,1),new AbortSpec(5,2)); Iterator dirs=storage.dirIterator(NameNodeDirType.EDITS); StorageDirectory sd=dirs.next(); FileJournalManager jm=new FileJournalManager(conf,sd,storage); assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false)); sd=dirs.next(); jm=new FileJournalManager(conf,sd,storage); assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false)); sd=dirs.next(); jm=new FileJournalManager(conf,sd,storage); assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that we can read from a stream created by FileJournalManager. * Create a single edits directory, failing it on the final roll. * Then try loading from the point of the 3rd roll. Verify that we read * the correct number of transactions from this point. */ @Test public void testReadFromStream() throws IOException { File f=new File(TestEditLog.TEST_DIR + "/readfromstream"); NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10,new AbortSpec(10,0)); StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next(); FileJournalManager jm=new FileJournalManager(conf,sd,storage); long expectedTotalTxnCount=TXNS_PER_ROLL * 10 + TXNS_PER_FAIL; assertEquals(expectedTotalTxnCount,getNumberOfTransactions(jm,1,true,false)); long skippedTxns=(3 * TXNS_PER_ROLL); long startingTxId=skippedTxns + 1; long numLoadable=getNumberOfTransactions(jm,startingTxId,true,false); assertEquals(expectedTotalTxnCount - skippedTxns,numLoadable); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Make sure that we starting reading the correct op when we request a stream * with a txid in the middle of an edit log file. */ @Test public void testReadFromMiddleOfEditLog() throws CorruptionException, IOException { File f=new File(TestEditLog.TEST_DIR + "/readfrommiddleofeditlog"); NNStorage storage=setupEdits(Collections.singletonList(f.toURI()),10); StorageDirectory sd=storage.dirIterator(NameNodeDirType.EDITS).next(); FileJournalManager jm=new FileJournalManager(conf,sd,storage); EditLogInputStream elis=getJournalInputStream(jm,5,true); try { FSEditLogOp op=elis.readOp(); assertEquals("read unexpected op",op.getTransactionId(),5); } finally { IOUtils.cleanup(LOG,elis); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test a mixture of inprogress files and finalised. Set up 3 edits * directories and fail the second on the last roll. Verify that reading * the transactions, reads from the finalised directories. */ @Test public void testInprogressRecoveryMixed() throws IOException { File f1=new File(TestEditLog.TEST_DIR + "/mixtest0"); File f2=new File(TestEditLog.TEST_DIR + "/mixtest1"); File f3=new File(TestEditLog.TEST_DIR + "/mixtest2"); List editUris=ImmutableList.of(f1.toURI(),f2.toURI(),f3.toURI()); NNStorage storage=setupEdits(editUris,5,new AbortSpec(5,1)); Iterator dirs=storage.dirIterator(NameNodeDirType.EDITS); StorageDirectory sd=dirs.next(); FileJournalManager jm=new FileJournalManager(conf,sd,storage); assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false)); sd=dirs.next(); jm=new FileJournalManager(conf,sd,storage); assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL,getNumberOfTransactions(jm,1,true,false)); sd=dirs.next(); jm=new FileJournalManager(conf,sd,storage); assertEquals(6 * TXNS_PER_ROLL,getNumberOfTransactions(jm,1,true,false)); }

Class: org.apache.hadoop.hdfs.server.namenode.TestFsck

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if fsck can return -1 in case of failure * @throws Exception */ @Test public void testFsckError() throws Exception { MiniDFSCluster cluster=null; try { Configuration conf=new HdfsConfiguration(); cluster=new MiniDFSCluster.Builder(conf).build(); String fileName="/test.txt"; Path filePath=new Path(fileName); FileSystem fs=cluster.getFileSystem(); DFSTestUtil.createFile(fs,filePath,1L,(short)1,1L); DFSTestUtil.waitReplication(fs,filePath,(short)1); INodeFile node=(INodeFile)cluster.getNamesystem().dir.getNode(fileName,true); final BlockInfo[] blocks=node.getBlocks(); assertEquals(blocks.length,1); blocks[0].setNumBytes(-1L); String outStr=runFsck(conf,-1,true,fileName); System.out.println(outStr); assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS)); fs.delete(filePath,true); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that the # of missing block replicas and expected replicas is correct * @throws IOException */ @Test public void testFsckMissingReplicas() throws IOException { final short REPL_FACTOR=2; final short NUM_REPLICAS=1; final short NUM_BLOCKS=3; final long blockSize=512; Configuration conf=new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,blockSize); MiniDFSCluster cluster=null; DistributedFileSystem dfs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build(); assertNotNull("Failed Cluster Creation",cluster); cluster.waitClusterUp(); dfs=cluster.getFileSystem(); assertNotNull("Failed to get FileSystem",dfs); final String pathString=new String("/testfile"); final Path path=new Path(pathString); long fileLen=blockSize * NUM_BLOCKS; DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1); NameNode namenode=cluster.getNameNode(); NetworkTopology nettop=cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology(); Map pmap=new HashMap(); Writer result=new StringWriter(); PrintWriter out=new PrintWriter(result,true); InetAddress remoteAddress=InetAddress.getLocalHost(); NamenodeFsck fsck=new NamenodeFsck(conf,namenode,nettop,pmap,out,NUM_REPLICAS,(short)1,remoteAddress); final HdfsFileStatus file=namenode.getRpcServer().getFileInfo(pathString); assertNotNull(file); Result res=new Result(conf); fsck.check(pathString,file,res); System.out.println(result.toString()); assertEquals(res.missingReplicas,(NUM_BLOCKS * REPL_FACTOR) - (NUM_BLOCKS * NUM_REPLICAS)); assertEquals(res.numExpectedReplicas,NUM_BLOCKS * REPL_FACTOR); } finally { if (dfs != null) { dfs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * do fsck */ @Test public void testFsck() throws Exception { DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(20).build(); MiniDFSCluster cluster=null; FileSystem fs=null; try { Configuration conf=new HdfsConfiguration(); final long precision=1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs=cluster.getFileSystem(); final String fileName="/srcdat"; util.createFiles(fs,fileName); util.waitReplication(fs,fileName,(short)3); final Path file=new Path(fileName); long aTime=fs.getFileStatus(file).getAccessTime(); Thread.sleep(precision); setupAuditLogs(); String outStr=runFsck(conf,0,true,"/"); verifyAuditLogs(); assertEquals(aTime,fs.getFileStatus(file).getAccessTime()); System.out.println(outStr); assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); if (fs != null) { try { fs.close(); } catch ( Exception e) { } } cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build(); outStr=runFsck(conf,1,true,"/"); assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS)); System.out.println(outStr); cluster.startDataNodes(conf,4,true,null,null); cluster.waitActive(); cluster.waitClusterUp(); fs=cluster.getFileSystem(); util.cleanup(fs,"/srcdat"); } finally { if (fs != null) { try { fs.close(); } catch ( Exception e) { } } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests that the # of misreplaced replicas is correct * @throws IOException */ @Test public void testFsckMisPlacedReplicas() throws IOException { final short REPL_FACTOR=2; short NUM_DN=2; final short NUM_BLOCKS=3; final long blockSize=512; String[] racks={"/rack1","/rack1"}; String[] hosts={"host1","host2"}; Configuration conf=new Configuration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,blockSize); MiniDFSCluster cluster=null; DistributedFileSystem dfs=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts).racks(racks).build(); assertNotNull("Failed Cluster Creation",cluster); cluster.waitClusterUp(); dfs=cluster.getFileSystem(); assertNotNull("Failed to get FileSystem",dfs); final String pathString=new String("/testfile"); final Path path=new Path(pathString); long fileLen=blockSize * NUM_BLOCKS; DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1); NameNode namenode=cluster.getNameNode(); NetworkTopology nettop=cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology(); nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2","/host3")); NUM_DN++; Map pmap=new HashMap(); Writer result=new StringWriter(); PrintWriter out=new PrintWriter(result,true); InetAddress remoteAddress=InetAddress.getLocalHost(); NamenodeFsck fsck=new NamenodeFsck(conf,namenode,nettop,pmap,out,NUM_DN,REPL_FACTOR,remoteAddress); final HdfsFileStatus file=namenode.getRpcServer().getFileInfo(pathString); assertNotNull(file); Result res=new Result(conf); fsck.check(pathString,file,res); assertEquals(res.numMisReplicatedBlocks,NUM_BLOCKS); } finally { if (dfs != null) { dfs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testFsckNonExistent() throws Exception { DFSTestUtil util=new DFSTestUtil.Builder().setName("TestFsck").setNumFiles(20).build(); MiniDFSCluster cluster=null; FileSystem fs=null; try { Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs=cluster.getFileSystem(); util.createFiles(fs,"/srcdat"); util.waitReplication(fs,"/srcdat",(short)3); String outStr=runFsck(conf,0,true,"/non-existent"); assertEquals(-1,outStr.indexOf(NamenodeFsck.HEALTHY_STATUS)); System.out.println(outStr); util.cleanup(fs,"/srcdat"); } finally { if (fs != null) { try { fs.close(); } catch ( Exception e) { } } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test fsck with symlinks in the filesystem */ @Test public void testFsckSymlink() throws Exception { final DFSTestUtil util=new DFSTestUtil.Builder().setName(getClass().getSimpleName()).setNumFiles(1).build(); final Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L); MiniDFSCluster cluster=null; FileSystem fs=null; try { final long precision=1L; conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,precision); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,10000L); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); fs=cluster.getFileSystem(); final String fileName="/srcdat"; util.createFiles(fs,fileName); final FileContext fc=FileContext.getFileContext(cluster.getConfiguration(0)); final Path file=new Path(fileName); final Path symlink=new Path("/srcdat-symlink"); fc.createSymlink(file,symlink,false); util.waitReplication(fs,fileName,(short)3); long aTime=fc.getFileStatus(symlink).getAccessTime(); Thread.sleep(precision); setupAuditLogs(); String outStr=runFsck(conf,0,true,"/"); verifyAuditLogs(); assertEquals(aTime,fc.getFileStatus(symlink).getAccessTime()); System.out.println(outStr); assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); assertTrue(outStr.contains("Total symlinks:\t\t1")); util.cleanup(fs,fileName); } finally { if (fs != null) { try { fs.close(); } catch ( Exception e) { } } if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that a dummy implementation of JournalManager can * be initialized on startup */ @Test public void testDummyJournalManager() throws Exception { MiniDFSCluster cluster=null; Configuration conf=new Configuration(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",DummyJournalManager.class.getName()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,DUMMY_URI); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,0); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); assertTrue(DummyJournalManager.shouldPromptCalled); assertTrue(DummyJournalManager.formatCalled); assertNotNull(DummyJournalManager.conf); assertEquals(new URI(DUMMY_URI),DummyJournalManager.uri); assertNotNull(DummyJournalManager.nsInfo); assertEquals(DummyJournalManager.nsInfo.getClusterID(),cluster.getNameNode().getNamesystem().getClusterId()); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestHDFSConcat

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that the concat operation is properly persisted in the * edit log, and properly replayed on restart. */ @Test public void testConcatInEditLog() throws Exception { final Path TEST_DIR=new Path("/testConcatInEditLog"); final long FILE_LEN=blockSize; Path[] srcFiles=new Path[3]; for (int i=0; i < srcFiles.length; i++) { Path path=new Path(TEST_DIR,"src-" + i); DFSTestUtil.createFile(dfs,path,FILE_LEN,REPL_FACTOR,1); srcFiles[i]=path; } Path targetFile=new Path(TEST_DIR,"target"); DFSTestUtil.createFile(dfs,targetFile,FILE_LEN,REPL_FACTOR,1); dfs.concat(targetFile,srcFiles); assertTrue(dfs.exists(targetFile)); FileStatus origStatus=dfs.getFileStatus(targetFile); cluster.restartNameNode(true); assertTrue(dfs.exists(targetFile)); assertFalse(dfs.exists(srcFiles[0])); FileStatus statusAfterRestart=dfs.getFileStatus(targetFile); assertEquals(origStatus.getModificationTime(),statusAfterRestart.getModificationTime()); }

IterativeVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Concatenates 10 files into one * Verifies the final size, deletion of the file, number of blocks * @throws IOException */ @Test public void testConcat() throws IOException, InterruptedException { final int numFiles=10; long fileLen=blockSize * 3; HdfsFileStatus fStatus; FSDataInputStream stm; String trg=new String("/trg"); Path trgPath=new Path(trg); DFSTestUtil.createFile(dfs,trgPath,fileLen,REPL_FACTOR,1); fStatus=nn.getFileInfo(trg); long trgLen=fStatus.getLen(); long trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount(); Path[] files=new Path[numFiles]; byte[][] bytes=new byte[numFiles][(int)fileLen]; LocatedBlocks[] lblocks=new LocatedBlocks[numFiles]; long[] lens=new long[numFiles]; int i=0; for (i=0; i < files.length; i++) { files[i]=new Path("/file" + i); Path path=files[i]; System.out.println("Creating file " + path); DFSTestUtil.createFile(dfs,path,fileLen,REPL_FACTOR,1); fStatus=nn.getFileInfo(path.toUri().getPath()); lens[i]=fStatus.getLen(); assertEquals(trgLen,lens[i]); lblocks[i]=nn.getBlockLocations(path.toUri().getPath(),0,lens[i]); stm=dfs.open(path); stm.readFully(0,bytes[i]); stm.close(); } final UserGroupInformation user1=UserGroupInformation.createUserForTesting("theDoctor",new String[]{"tardis"}); DistributedFileSystem hdfs=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(user1,conf); try { hdfs.concat(trgPath,files); fail("Permission exception expected"); } catch ( IOException ie) { System.out.println("Got expected exception for permissions:" + ie.getLocalizedMessage()); } ContentSummary cBefore=dfs.getContentSummary(trgPath.getParent()); dfs.concat(trgPath,files); ContentSummary cAfter=dfs.getContentSummary(trgPath.getParent()); assertEquals(cBefore.getFileCount(),cAfter.getFileCount() + files.length); long totalLen=trgLen; long totalBlocks=trgBlocks; for (i=0; i < files.length; i++) { totalLen+=lens[i]; totalBlocks+=lblocks[i].locatedBlockCount(); } System.out.println("total len=" + totalLen + "; totalBlocks="+ totalBlocks); fStatus=nn.getFileInfo(trg); trgLen=fStatus.getLen(); stm=dfs.open(trgPath); byte[] byteFileConcat=new byte[(int)trgLen]; stm.readFully(0,byteFileConcat); stm.close(); trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount(); assertEquals(trgBlocks,totalBlocks); assertEquals(trgLen,totalLen); for ( Path p : files) { fStatus=nn.getFileInfo(p.toUri().getPath()); assertNull("File " + p + " still exists",fStatus); DFSTestUtil.createFile(dfs,p,fileLen,REPL_FACTOR,1); } checkFileContent(byteFileConcat,bytes); Path smallFile=new Path("/sfile"); int sFileLen=10; DFSTestUtil.createFile(dfs,smallFile,sFileLen,REPL_FACTOR,1); dfs.concat(trgPath,new Path[]{smallFile}); fStatus=nn.getFileInfo(trg); trgLen=fStatus.getLen(); trgBlocks=nn.getBlockLocations(trg,0,trgLen).locatedBlockCount(); assertEquals(trgBlocks,totalBlocks + 1); assertEquals(trgLen,totalLen + sFileLen); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testConcatNotCompleteBlock() throws IOException { long trgFileLen=blockSize * 3; long srcFileLen=blockSize * 3 + 20; String name1="/trg", name2="/src"; Path filePath1=new Path(name1); DFSTestUtil.createFile(dfs,filePath1,trgFileLen,REPL_FACTOR,1); HdfsFileStatus fStatus=nn.getFileInfo(name1); long fileLen=fStatus.getLen(); assertEquals(fileLen,trgFileLen); FSDataInputStream stm=dfs.open(filePath1); byte[] byteFile1=new byte[(int)trgFileLen]; stm.readFully(0,byteFile1); stm.close(); LocatedBlocks lb1=nn.getBlockLocations(name1,0,trgFileLen); Path filePath2=new Path(name2); DFSTestUtil.createFile(dfs,filePath2,srcFileLen,REPL_FACTOR,1); fStatus=nn.getFileInfo(name2); fileLen=fStatus.getLen(); assertEquals(srcFileLen,fileLen); stm=dfs.open(filePath2); byte[] byteFile2=new byte[(int)srcFileLen]; stm.readFully(0,byteFile2); stm.close(); LocatedBlocks lb2=nn.getBlockLocations(name2,0,srcFileLen); System.out.println("trg len=" + trgFileLen + "; src len="+ srcFileLen); dfs.concat(filePath1,new Path[]{filePath2}); long totalLen=trgFileLen + srcFileLen; fStatus=nn.getFileInfo(name1); fileLen=fStatus.getLen(); stm=dfs.open(filePath1); byte[] byteFileConcat=new byte[(int)fileLen]; stm.readFully(0,byteFileConcat); stm.close(); LocatedBlocks lbConcat=nn.getBlockLocations(name1,0,fileLen); assertEquals(lbConcat.locatedBlockCount(),lb1.locatedBlockCount() + lb2.locatedBlockCount()); System.out.println("file1 len=" + fileLen + "; total len="+ totalLen); assertEquals(fileLen,totalLen); fStatus=nn.getFileInfo(name2); assertNull("File " + name2 + "still exists",fStatus); checkFileContent(byteFileConcat,new byte[][]{byteFile1,byteFile2}); }

Class: org.apache.hadoop.hdfs.server.namenode.TestINodeFile

APIUtilityVerifier EqualityVerifier 
@Test public void testPreferredBlockSizeUpperBound(){ replication=3; preferredBlockSize=BLKSIZE_MAXVALUE; INodeFile inf=createINodeFile(replication,preferredBlockSize); assertEquals("True has to be returned in this case",BLKSIZE_MAXVALUE,inf.getPreferredBlockSize()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFileUnderConstruction(){ replication=3; final INodeFile file=new INodeFile(INodeId.GRANDFATHER_INODE_ID,null,perm,0L,0L,null,replication,1024L); assertFalse(file.isUnderConstruction()); final String clientName="client"; final String clientMachine="machine"; file.toUnderConstruction(clientName,clientMachine); assertTrue(file.isUnderConstruction()); FileUnderConstructionFeature uc=file.getFileUnderConstructionFeature(); assertEquals(clientName,uc.getClientName()); assertEquals(clientMachine,uc.getClientMachine()); file.toCompleteFile(Time.now()); assertFalse(file.isUnderConstruction()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests for addressing files using /.reserved/.inodes/ in file system * operations. */ @Test public void testInodeIdBasedPaths() throws Exception { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY,true); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs=cluster.getFileSystem(); NamenodeProtocols nnRpc=cluster.getNameNodeRpc(); Path baseDir=getInodePath(INodeId.ROOT_INODE_ID,"testInodeIdBasedPaths"); Path baseDirRegPath=new Path("/testInodeIdBasedPaths"); fs.mkdirs(baseDir); fs.exists(baseDir); long baseDirFileId=nnRpc.getFileInfo(baseDir.toString()).getFileId(); Path testFileInodePath=getInodePath(baseDirFileId,"test1"); Path testFileRegularPath=new Path(baseDir,"test1"); final int testFileBlockSize=1024; FileSystemTestHelper.createFile(fs,testFileInodePath,1,testFileBlockSize); assertTrue(fs.exists(testFileInodePath)); FsPermission perm=new FsPermission((short)0666); fs.setPermission(testFileInodePath,perm); FileStatus fileStatus=fs.getFileStatus(testFileInodePath); assertEquals(perm,fileStatus.getPermission()); fs.setOwner(testFileInodePath,fileStatus.getOwner(),fileStatus.getGroup()); fs.setTimes(testFileInodePath,0,0); fileStatus=fs.getFileStatus(testFileInodePath); assertEquals(0,fileStatus.getModificationTime()); assertEquals(0,fileStatus.getAccessTime()); fs.setReplication(testFileInodePath,(short)3); fileStatus=fs.getFileStatus(testFileInodePath); assertEquals(3,fileStatus.getReplication()); fs.setReplication(testFileInodePath,(short)1); assertEquals(testFileBlockSize,nnRpc.getPreferredBlockSize(testFileInodePath.toString())); { fs.isFileClosed(testFileInodePath); fs.getAclStatus(testFileInodePath); fs.getXAttrs(testFileInodePath); fs.listXAttrs(testFileInodePath); fs.access(testFileInodePath,FsAction.READ_WRITE); } String invalidTarget=new Path(baseDir,"invalidTarget").toString(); String link=new Path(baseDir,"link").toString(); testInvalidSymlinkTarget(nnRpc,invalidTarget,link); String validTarget="/validtarget"; testValidSymlinkTarget(nnRpc,validTarget,link); fs.append(testFileInodePath); fs.recoverLease(testFileInodePath); LocatedBlocks l1=nnRpc.getBlockLocations(testFileInodePath.toString(),0,Long.MAX_VALUE); LocatedBlocks l2=nnRpc.getBlockLocations(testFileRegularPath.toString(),0,Long.MAX_VALUE); checkEquals(l1,l2); Path renameDst=getInodePath(baseDirFileId,"test2"); fileStatus=fs.getFileStatus(testFileInodePath); fs.rename(testFileInodePath,renameDst); fs.rename(renameDst,testFileInodePath); assertEquals(fileStatus,fs.getFileStatus(testFileInodePath)); fs.rename(testFileInodePath,renameDst,Rename.OVERWRITE); fs.rename(renameDst,testFileInodePath,Rename.OVERWRITE); assertEquals(fileStatus,fs.getFileStatus(testFileInodePath)); assertEquals(fs.getContentSummary(testFileRegularPath).toString(),fs.getContentSummary(testFileInodePath).toString()); checkEquals(fs.listFiles(baseDirRegPath,false),fs.listFiles(baseDir,false)); fs.delete(testFileInodePath,true); assertFalse(fs.exists(testFileInodePath)); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier EqualityVerifier 
/** * Test for the Replication value. Sets a value and checks if it was set * correct. */ @Test public void testReplication(){ replication=3; preferredBlockSize=128 * 1024 * 1024; INodeFile inf=createINodeFile(replication,preferredBlockSize); assertEquals("True has to be returned in this case",replication,inf.getFileReplication()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This test verifies inode ID counter and inode map functionality. */ @Test public void testInodeId() throws IOException { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); FSNamesystem fsn=cluster.getNamesystem(); long lastId=fsn.getLastInodeId(); int inodeCount=1; long expectedLastInodeId=INodeId.ROOT_INODE_ID; assertEquals(fsn.dir.rootDir.getId(),INodeId.ROOT_INODE_ID); assertEquals(expectedLastInodeId,lastId); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); FileSystem fs=cluster.getFileSystem(); Path path=new Path("/test1"); assertTrue(fs.mkdirs(path)); assertEquals(++expectedLastInodeId,fsn.getLastInodeId()); assertEquals(++inodeCount,fsn.dir.getInodeMapSize()); NamenodeProtocols nnrpc=cluster.getNameNodeRpc(); DFSTestUtil.createFile(fs,new Path("/test1/file"),1024,(short)1,0); assertEquals(++expectedLastInodeId,fsn.getLastInodeId()); assertEquals(++inodeCount,fsn.dir.getInodeMapSize()); HdfsFileStatus fileStatus=nnrpc.getFileInfo("/test1/file"); assertEquals(expectedLastInodeId,fileStatus.getFileId()); Path renamedPath=new Path("/test2"); assertTrue(fs.rename(path,renamedPath)); assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); assertTrue(fs.delete(renamedPath,true)); inodeCount-=2; assertEquals(inodeCount,fsn.dir.getInodeMapSize()); String file1="/test1/file1"; String file2="/test1/file2"; DFSTestUtil.createFile(fs,new Path(file1),512,(short)1,0); DFSTestUtil.createFile(fs,new Path(file2),512,(short)1,0); inodeCount+=3; expectedLastInodeId+=3; assertEquals(inodeCount,fsn.dir.getInodeMapSize()); assertEquals(expectedLastInodeId,fsn.getLastInodeId()); nnrpc.concat(file2,new String[]{file1}); inodeCount--; assertEquals(inodeCount,fsn.dir.getInodeMapSize()); assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertTrue(fs.delete(new Path("/test1"),true)); inodeCount-=2; assertEquals(inodeCount,fsn.dir.getInodeMapSize()); cluster.restartNameNode(); cluster.waitActive(); fsn=cluster.getNamesystem(); assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); DFSTestUtil.createFile(fs,new Path("/test2/file2"),1024,(short)1,0); expectedLastInodeId+=2; inodeCount+=2; assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); FSDataOutputStream outStream=fs.create(new Path("/test3/file")); assertTrue(outStream != null); expectedLastInodeId+=2; inodeCount+=2; assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); fsn.enterSafeMode(false); fsn.saveNamespace(); fsn.leaveSafeMode(); outStream.close(); cluster.restartNameNode(); cluster.waitActive(); fsn=cluster.getNamesystem(); assertEquals(expectedLastInodeId,fsn.getLastInodeId()); assertEquals(inodeCount,fsn.dir.getInodeMapSize()); } finally { if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testXAttrFeature(){ replication=3; preferredBlockSize=128 * 1024 * 1024; INodeFile inf=createINodeFile(replication,preferredBlockSize); ImmutableList.Builder builder=new ImmutableList.Builder(); XAttr xAttr=new XAttr.Builder().setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(new byte[]{0x31,0x32,0x33}).build(); builder.add(xAttr); XAttrFeature f=new XAttrFeature(builder.build()); inf.addXAttrFeature(f); XAttrFeature f1=inf.getXAttrFeature(); assertEquals(xAttr,f1.getXAttrs().get(0)); inf.removeXAttrFeature(); f1=inf.getXAttrFeature(); assertEquals(f1,null); }

APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests for {@link FSDirectory#resolvePath(String,byte[][],FSDirectory)} */ @Test public void testInodePath() throws IOException { String path="/a/b/c"; INode inode=createTreeOfInodes(path); FSDirectory fsd=Mockito.mock(FSDirectory.class); Mockito.doReturn(inode).when(fsd).getInode(Mockito.anyLong()); assertEquals("/test",FSDirectory.resolvePath("/test",null,fsd)); byte[][] components=INode.getPathComponents(path); String resolvedPath=FSDirectory.resolvePath(path,components,fsd); assertEquals(path,resolvedPath); components=INode.getPathComponents("/.reserved/.inodes/1"); resolvedPath=FSDirectory.resolvePath(path,components,fsd); assertEquals(path,resolvedPath); components=INode.getPathComponents("/.reserved/.inodes/1/"); assertEquals(path,resolvedPath); components=INode.getPathComponents("/.reserved/.inodes/1/d/e/f"); resolvedPath=FSDirectory.resolvePath(path,components,fsd); assertEquals("/a/b/c/d/e/f",resolvedPath); String testPath="/.reserved/.inodes"; components=INode.getPathComponents(testPath); resolvedPath=FSDirectory.resolvePath(testPath,components,fsd); assertEquals(testPath,resolvedPath); testPath="/.reserved/.inodes/" + INodeId.ROOT_INODE_ID; components=INode.getPathComponents(testPath); resolvedPath=FSDirectory.resolvePath(testPath,components,fsd); assertEquals("/",resolvedPath); testPath="/.invalid/.inodes/1"; components=INode.getPathComponents(testPath); resolvedPath=FSDirectory.resolvePath(testPath,components,fsd); assertEquals(testPath,resolvedPath); Mockito.doReturn(null).when(fsd).getInode(Mockito.anyLong()); testPath="/.reserved/.inodes/1234"; components=INode.getPathComponents(testPath); try { String realPath=FSDirectory.resolvePath(testPath,components,fsd); fail("Path should not be resolved:" + realPath); } catch ( IOException e) { assertTrue(e instanceof FileNotFoundException); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testGetFullPathName(){ replication=3; preferredBlockSize=128 * 1024 * 1024; INodeFile inf=createINodeFile(replication,preferredBlockSize); inf.setLocalName(DFSUtil.string2Bytes("f")); INodeDirectory root=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,INodeDirectory.ROOT_NAME,perm,0L); INodeDirectory dir=new INodeDirectory(INodeId.GRANDFATHER_INODE_ID,DFSUtil.string2Bytes("d"),perm,0L); assertEquals("f",inf.getFullPathName()); dir.addChild(inf); assertEquals("d" + Path.SEPARATOR + "f",inf.getFullPathName()); root.addChild(dir); assertEquals(Path.SEPARATOR + "d" + Path.SEPARATOR+ "f",inf.getFullPathName()); assertEquals(Path.SEPARATOR + "d",dir.getFullPathName()); assertEquals(Path.SEPARATOR,root.getFullPathName()); }

InternalCallVerifier EqualityVerifier 
@Test public void testConcatBlocks(){ INodeFile origFile=createINodeFiles(1,"origfile")[0]; assertEquals("Number of blocks didn't match",origFile.numBlocks(),1L); INodeFile[] appendFiles=createINodeFiles(4,"appendfile"); origFile.concatBlocks(appendFiles); assertEquals("Number of blocks didn't match",origFile.numBlocks(),5L); }

APIUtilityVerifier EqualityVerifier 
/** * Test for the PreferredBlockSize value. Sets a value and checks if it was * set correct. */ @Test public void testPreferredBlockSize(){ replication=3; preferredBlockSize=128 * 1024 * 1024; INodeFile inf=createINodeFile(replication,preferredBlockSize); assertEquals("True has to be returned in this case",preferredBlockSize,inf.getPreferredBlockSize()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to * replace the original INodeDirectory. Before HDFS-4243, the parent field of * all the children INodes of the target INodeDirectory is not changed to * point to the new INodeDirectoryWithQuota. This testcase tests this * scenario. */ @Test public void testGetFullPathNameAfterSetQuota() throws Exception { long fileLen=1024; replication=3; Configuration conf=new Configuration(); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(replication).build(); cluster.waitActive(); FSNamesystem fsn=cluster.getNamesystem(); FSDirectory fsdir=fsn.getFSDirectory(); DistributedFileSystem dfs=cluster.getFileSystem(); final Path dir=new Path("/dir"); final Path file=new Path(dir,"file"); DFSTestUtil.createFile(dfs,file,fileLen,replication,0L); INode fnode=fsdir.getINode(file.toString()); assertEquals(file.toString(),fnode.getFullPathName()); dfs.setQuota(dir,Long.MAX_VALUE - 1,replication * fileLen * 10); INodeDirectory dirNode=getDir(fsdir,dir); assertEquals(dir.toString(),dirNode.getFullPathName()); assertTrue(dirNode.isWithQuota()); final Path newDir=new Path("/newdir"); final Path newFile=new Path(newDir,"file"); dfs.rename(dir,newDir,Options.Rename.OVERWRITE); fnode=fsdir.getINode(newFile.toString()); assertEquals(newFile.toString(),fnode.getFullPathName()); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestLargeDirectoryDelete

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void largeDelete() throws Throwable { mc=new MiniDFSCluster.Builder(CONF).build(); try { mc.waitActive(); Assert.assertNotNull("No Namenode in cluster",mc.getNameNode()); createFiles(); Assert.assertEquals(TOTAL_BLOCKS,getBlockCount()); runThreads(); } finally { mc.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestMetaSave

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests metasave */ @Test public void testMetaSave() throws IOException, InterruptedException { for (int i=0; i < 2; i++) { Path file=new Path("/filestatus" + i); DFSTestUtil.createFile(fileSys,file,1024,1024,blockSize,(short)2,seed); } cluster.stopDataNode(1); Thread.sleep(15000); namesystem.setReplication("/filestatus0",(short)4); namesystem.metaSave("metasave.out.txt"); FileInputStream fstream=new FileInputStream(getLogFile("metasave.out.txt")); DataInputStream in=new DataInputStream(fstream); BufferedReader reader=null; try { reader=new BufferedReader(new InputStreamReader(in)); String line=reader.readLine(); Assert.assertEquals("3 files and directories, 2 blocks = 5 total filesystem objects",line); line=reader.readLine(); assertTrue(line.equals("Live Datanodes: 1")); line=reader.readLine(); assertTrue(line.equals("Dead Datanodes: 1")); line=reader.readLine(); line=reader.readLine(); assertTrue(line.matches("^/filestatus[01]:.*")); } finally { if (reader != null) reader.close(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestNNStorageRetentionFunctional

EqualityVerifier 
/** * Test case where two directories are configured as NAME_AND_EDITS * and one of them fails to save storage. Since the edits and image * failure states are decoupled, the failure of image saving should * not prevent the purging of logs from that dir. */ @Test public void testPurgingWithNameEditsDirAfterFailure() throws Exception { MiniDFSCluster cluster=null; Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY,0); File sd0=new File(TEST_ROOT_DIR,"nn0"); File sd1=new File(TEST_ROOT_DIR,"nn1"); File cd0=new File(sd0,"current"); File cd1=new File(sd1,"current"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(sd0,sd1)); try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).format(true).build(); NameNode nn=cluster.getNameNode(); doSaveNamespace(nn); LOG.info("After first save, images 0 and 2 should exist in both dirs"); assertGlobEquals(cd0,"fsimage_\\d*",getImageFileName(0),getImageFileName(2)); assertGlobEquals(cd1,"fsimage_\\d*",getImageFileName(0),getImageFileName(2)); assertGlobEquals(cd0,"edits_.*",getFinalizedEditsFileName(1,2),getInProgressEditsFileName(3)); assertGlobEquals(cd1,"edits_.*",getFinalizedEditsFileName(1,2),getInProgressEditsFileName(3)); doSaveNamespace(nn); LOG.info("After second save, image 0 should be purged, " + "and image 4 should exist in both."); assertGlobEquals(cd0,"fsimage_\\d*",getImageFileName(2),getImageFileName(4)); assertGlobEquals(cd1,"fsimage_\\d*",getImageFileName(2),getImageFileName(4)); assertGlobEquals(cd0,"edits_.*",getFinalizedEditsFileName(3,4),getInProgressEditsFileName(5)); assertGlobEquals(cd1,"edits_.*",getFinalizedEditsFileName(3,4),getInProgressEditsFileName(5)); LOG.info("Failing first storage dir by chmodding it"); assertEquals(0,FileUtil.chmod(cd0.getAbsolutePath(),"000")); doSaveNamespace(nn); LOG.info("Restoring accessibility of first storage dir"); assertEquals(0,FileUtil.chmod(cd0.getAbsolutePath(),"755")); LOG.info("nothing should have been purged in first storage dir"); assertGlobEquals(cd0,"fsimage_\\d*",getImageFileName(2),getImageFileName(4)); assertGlobEquals(cd0,"edits_.*",getFinalizedEditsFileName(3,4),getInProgressEditsFileName(5)); LOG.info("fsimage_2 should be purged in second storage dir"); assertGlobEquals(cd1,"fsimage_\\d*",getImageFileName(4),getImageFileName(6)); assertGlobEquals(cd1,"edits_.*",getFinalizedEditsFileName(5,6),getInProgressEditsFileName(7)); LOG.info("On next save, we should purge logs from the failed dir," + " but not images, since the image directory is in failed state."); doSaveNamespace(nn); assertGlobEquals(cd1,"fsimage_\\d*",getImageFileName(6),getImageFileName(8)); assertGlobEquals(cd1,"edits_.*",getFinalizedEditsFileName(7,8),getInProgressEditsFileName(9)); assertGlobEquals(cd0,"fsimage_\\d*",getImageFileName(2),getImageFileName(4)); assertGlobEquals(cd0,"edits_.*",getInProgressEditsFileName(9)); } finally { FileUtil.chmod(cd0.getAbsolutePath(),"755"); LOG.info("Shutting down..."); if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestNameCache

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testDictionary() throws Exception { NameCache cache=new NameCache(2); String[] matching={"part1","part10000000","fileabc","abc","filepart"}; String[] notMatching={"spart1","apart","abcd","def"}; for ( String s : matching) { cache.put(s); assertTrue(s == cache.put(s)); } for ( String s : notMatching) { cache.put(s); } cache.initialized(); for ( String s : matching) { verifyNameReuse(cache,s,true); } assertEquals(matching.length,cache.size()); for ( String s : notMatching) { verifyNameReuse(cache,s,false); } cache.reset(); cache.initialized(); for ( String s : matching) { verifyNameReuse(cache,s,false); } for ( String s : notMatching) { verifyNameReuse(cache,s,false); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestNameNodeMXBean

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings({"unchecked"}) @Test public void testNameNodeMXBeanInfo() throws Exception { Configuration conf=new Configuration(); conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,NativeIO.POSIX.getCacheManipulator().getMemlockLimit()); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,1); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FSNamesystem fsn=cluster.getNameNode().namesystem; MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo"); FileSystem localFileSys=FileSystem.getLocal(conf); Path workingDir=localFileSys.getWorkingDirectory(); Path dir=new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean"); Path includeFile=new Path(dir,"include"); assertTrue(localFileSys.mkdirs(dir)); StringBuilder includeHosts=new StringBuilder(); for ( DataNode dn : cluster.getDataNodes()) { includeHosts.append(dn.getDisplayName()).append("\n"); } DFSTestUtil.writeFile(localFileSys,includeFile,includeHosts.toString()); conf.set(DFSConfigKeys.DFS_HOSTS,includeFile.toUri().getPath()); fsn.getBlockManager().getDatanodeManager().refreshNodes(conf); cluster.stopDataNode(0); while (fsn.getNumDatanodesInService() != 2) { try { Thread.sleep(1000); } catch ( InterruptedException e) { } } String clusterId=(String)mbs.getAttribute(mxbeanName,"ClusterId"); assertEquals(fsn.getClusterId(),clusterId); String blockpoolId=(String)mbs.getAttribute(mxbeanName,"BlockPoolId"); assertEquals(fsn.getBlockPoolId(),blockpoolId); String version=(String)mbs.getAttribute(mxbeanName,"Version"); assertEquals(fsn.getVersion(),version); assertTrue(version.equals(VersionInfo.getVersion() + ", r" + VersionInfo.getRevision())); Long used=(Long)mbs.getAttribute(mxbeanName,"Used"); assertEquals(fsn.getUsed(),used.longValue()); Long total=(Long)mbs.getAttribute(mxbeanName,"Total"); assertEquals(fsn.getTotal(),total.longValue()); String safemode=(String)mbs.getAttribute(mxbeanName,"Safemode"); assertEquals(fsn.getSafemode(),safemode); Long nondfs=(Long)(mbs.getAttribute(mxbeanName,"NonDfsUsedSpace")); assertEquals(fsn.getNonDfsUsedSpace(),nondfs.longValue()); Float percentremaining=(Float)(mbs.getAttribute(mxbeanName,"PercentRemaining")); assertEquals(fsn.getPercentRemaining(),percentremaining.floatValue(),DELTA); Long totalblocks=(Long)(mbs.getAttribute(mxbeanName,"TotalBlocks")); assertEquals(fsn.getTotalBlocks(),totalblocks.longValue()); String alivenodeinfo=(String)(mbs.getAttribute(mxbeanName,"LiveNodes")); Map> liveNodes=(Map>)JSON.parse(alivenodeinfo); assertTrue(liveNodes.size() > 0); for ( Map liveNode : liveNodes.values()) { assertTrue(liveNode.containsKey("nonDfsUsedSpace")); assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0); assertTrue(liveNode.containsKey("capacity")); assertTrue(((Long)liveNode.get("capacity")) > 0); assertTrue(liveNode.containsKey("numBlocks")); assertTrue(((Long)liveNode.get("numBlocks")) == 0); } assertEquals(fsn.getLiveNodes(),alivenodeinfo); String deadnodeinfo=(String)(mbs.getAttribute(mxbeanName,"DeadNodes")); assertEquals(fsn.getDeadNodes(),deadnodeinfo); Map> deadNodes=(Map>)JSON.parse(deadnodeinfo); assertTrue(deadNodes.size() > 0); for ( Map deadNode : deadNodes.values()) { assertTrue(deadNode.containsKey("lastContact")); assertTrue(deadNode.containsKey("decommissioned")); assertTrue(deadNode.containsKey("xferaddr")); } String nodeUsage=(String)(mbs.getAttribute(mxbeanName,"NodeUsage")); assertEquals("Bad value for NodeUsage",fsn.getNodeUsage(),nodeUsage); String nameJournalStatus=(String)(mbs.getAttribute(mxbeanName,"NameJournalStatus")); assertEquals("Bad value for NameJournalStatus",fsn.getNameJournalStatus(),nameJournalStatus); String journalTxnInfo=(String)mbs.getAttribute(mxbeanName,"JournalTransactionInfo"); assertEquals("Bad value for NameTxnIds",fsn.getJournalTransactionInfo(),journalTxnInfo); String nnStarted=(String)mbs.getAttribute(mxbeanName,"NNStarted"); assertEquals("Bad value for NNStarted",fsn.getNNStarted(),nnStarted); String compileInfo=(String)mbs.getAttribute(mxbeanName,"CompileInfo"); assertEquals("Bad value for CompileInfo",fsn.getCompileInfo(),compileInfo); String corruptFiles=(String)(mbs.getAttribute(mxbeanName,"CorruptFiles")); assertEquals("Bad value for CorruptFiles",fsn.getCorruptFiles(),corruptFiles); String nameDirStatuses=(String)(mbs.getAttribute(mxbeanName,"NameDirStatuses")); assertEquals(fsn.getNameDirStatuses(),nameDirStatuses); Map> statusMap=(Map>)JSON.parse(nameDirStatuses); Collection nameDirUris=cluster.getNameDirs(0); for ( URI nameDirUri : nameDirUris) { File nameDir=new File(nameDirUri); System.out.println("Checking for the presence of " + nameDir + " in active name dirs."); assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath())); } assertEquals(2,statusMap.get("active").size()); assertEquals(0,statusMap.get("failed").size()); File failedNameDir=new File(nameDirUris.iterator().next()); assertEquals(0,FileUtil.chmod(new File(failedNameDir,"current").getAbsolutePath(),"000")); cluster.getNameNodeRpc().rollEditLog(); nameDirStatuses=(String)(mbs.getAttribute(mxbeanName,"NameDirStatuses")); statusMap=(Map>)JSON.parse(nameDirStatuses); for ( URI nameDirUri : nameDirUris) { File nameDir=new File(nameDirUri); String expectedStatus=nameDir.equals(failedNameDir) ? "failed" : "active"; System.out.println("Checking for the presence of " + nameDir + " in "+ expectedStatus+ " name dirs."); assertTrue(statusMap.get(expectedStatus).containsKey(nameDir.getAbsolutePath())); } assertEquals(1,statusMap.get("active").size()); assertEquals(1,statusMap.get("failed").size()); assertEquals(0L,mbs.getAttribute(mxbeanName,"CacheUsed")); assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * cluster.getDataNodes().size(),mbs.getAttribute(mxbeanName,"CacheCapacity")); } finally { if (cluster != null) { for ( URI dir : cluster.getNameDirs(0)) { FileUtil.chmod(new File(new File(dir),"current").getAbsolutePath(),"755"); } cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestNameNodeOptionParsing

BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testUpgrade(){ StartupOption opt=null; opt=NameNode.parseArguments(new String[]{"-upgrade"}); assertEquals(opt,StartupOption.UPGRADE); assertNull(opt.getClusterId()); assertTrue(FSImageFormat.renameReservedMap.isEmpty()); opt=NameNode.parseArguments(new String[]{"-upgrade","-clusterid","mycid"}); assertEquals(StartupOption.UPGRADE,opt); assertEquals("mycid",opt.getClusterId()); assertTrue(FSImageFormat.renameReservedMap.isEmpty()); opt=NameNode.parseArguments(new String[]{"-upgrade","-clusterid","mycid","-renameReserved",".snapshot=.my-snapshot,.reserved=.my-reserved"}); assertEquals(StartupOption.UPGRADE,opt); assertEquals("mycid",opt.getClusterId()); assertEquals(".my-snapshot",FSImageFormat.renameReservedMap.get(".snapshot")); assertEquals(".my-reserved",FSImageFormat.renameReservedMap.get(".reserved")); FSImageFormat.renameReservedMap.clear(); opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".reserved=.my-reserved,.snapshot=.my-snapshot","-clusterid","mycid"}); assertEquals(StartupOption.UPGRADE,opt); assertEquals("mycid",opt.getClusterId()); assertEquals(".my-snapshot",FSImageFormat.renameReservedMap.get(".snapshot")); assertEquals(".my-reserved",FSImageFormat.renameReservedMap.get(".reserved")); opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved"}); assertEquals(StartupOption.UPGRADE,opt); assertEquals(".snapshot." + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED",FSImageFormat.renameReservedMap.get(".snapshot")); assertEquals(".reserved." + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED",FSImageFormat.renameReservedMap.get(".reserved")); try { opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".reserved=.my-reserved,.not-reserved=.my-not-reserved"}); } catch ( IllegalArgumentException e) { assertExceptionContains("Unknown reserved path",e); } try { opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".reserved=.my-reserved,.snapshot=.snapshot"}); } catch ( IllegalArgumentException e) { assertExceptionContains("Invalid rename path",e); } try { opt=NameNode.parseArguments(new String[]{"-upgrade","-renameReserved",".snapshot=.reserved"}); } catch ( IllegalArgumentException e) { assertExceptionContains("Invalid rename path",e); } opt=NameNode.parseArguments(new String[]{"-upgrade","-cid"}); assertNull(opt); }

Class: org.apache.hadoop.hdfs.server.namenode.TestNameNodeResourceChecker

InternalCallVerifier EqualityVerifier 
/** * Tests that only a single space check is performed if two name dirs are * supplied which are on the same volume. */ @Test public void testChecking2NameDirsOnOneVolume() throws IOException { Configuration conf=new Configuration(); File nameDir1=new File(BASE_DIR,"name-dir1"); File nameDir2=new File(BASE_DIR,"name-dir2"); nameDir1.mkdirs(); nameDir2.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath()); conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,Long.MAX_VALUE); NameNodeResourceChecker nb=new NameNodeResourceChecker(conf); assertEquals("Should not check the same volume more than once.",1,nb.getVolumesLowOnSpace().size()); }

InternalCallVerifier EqualityVerifier 
/** * Tests that only a single space check is performed if extra volumes are * configured manually which also coincide with a volume the name dir is on. */ @Test public void testCheckingExtraVolumes() throws IOException { Configuration conf=new Configuration(); File nameDir=new File(BASE_DIR,"name-dir"); nameDir.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,nameDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY,nameDir.getAbsolutePath()); conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,Long.MAX_VALUE); NameNodeResourceChecker nb=new NameNodeResourceChecker(conf); assertEquals("Should not check the same volume more than once.",1,nb.getVolumesLowOnSpace().size()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestNameNodeRpcServer

InternalCallVerifier EqualityVerifier 
@Test public void testNamenodeRpcBindAny() throws IOException { Configuration conf=new HdfsConfiguration(); conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY,"0.0.0.0"); MiniDFSCluster cluster=null; try { cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); assertEquals("0.0.0.0",((NameNodeRpcServer)cluster.getNameNodeRpc()).getClientRpcServer().getListenerAddress().getHostName()); } finally { if (cluster != null) { cluster.shutdown(); } conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestNameNodeXAttr

APIUtilityVerifier EqualityVerifier 
@Test(timeout=120000) public void testXAttrSymlinks() throws Exception { fs.mkdirs(linkParent); fs.mkdirs(targetParent); DFSTestUtil.createFile(fs,target,1024,(short)3,0xBEEFl); fs.createSymlink(target,link,false); fs.setXAttr(target,name1,value1); fs.setXAttr(target,name2,value2); Map xattrs=fs.getXAttrs(link); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); fs.setXAttr(link,name3,null); xattrs=fs.getXAttrs(target); Assert.assertEquals(xattrs.size(),3); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); Assert.assertArrayEquals(new byte[0],xattrs.get(name3)); fs.removeXAttr(link,name1); xattrs=fs.getXAttrs(target); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value2,xattrs.get(name2)); Assert.assertArrayEquals(new byte[0],xattrs.get(name3)); fs.removeXAttr(target,name3); xattrs=fs.getXAttrs(link); Assert.assertEquals(xattrs.size(),1); Assert.assertArrayEquals(value2,xattrs.get(name2)); fs.delete(linkParent,true); fs.delete(targetParent,true); }

Class: org.apache.hadoop.hdfs.server.namenode.TestNamenodeCapacityReport

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testXceiverCount() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,0); MiniDFSCluster cluster=null; final int nodes=8; final int fileCount=5; final short fileRepl=3; try { cluster=new MiniDFSCluster.Builder(conf).numDataNodes(nodes).build(); cluster.waitActive(); final FSNamesystem namesystem=cluster.getNamesystem(); final DatanodeManager dnm=namesystem.getBlockManager().getDatanodeManager(); List datanodes=cluster.getDataNodes(); final DistributedFileSystem fs=cluster.getFileSystem(); triggerHeartbeats(datanodes); int expectedTotalLoad=nodes; int expectedInServiceNodes=nodes; int expectedInServiceLoad=nodes; assertEquals(nodes,namesystem.getNumLiveDataNodes()); assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService()); assertEquals(expectedTotalLoad,namesystem.getTotalLoad()); assertEquals((double)expectedInServiceLoad / expectedInServiceLoad,namesystem.getInServiceXceiverAverage(),EPSILON); for (int i=0; i < nodes / 2; i++) { DataNode dn=datanodes.get(i); DatanodeDescriptor dnd=dnm.getDatanode(dn.getDatanodeId()); dn.shutdown(); dnd.setLastUpdate(0L); BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager()); expectedInServiceNodes--; assertEquals(expectedInServiceNodes,namesystem.getNumLiveDataNodes()); assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService()); } cluster.restartDataNodes(); cluster.waitActive(); datanodes=cluster.getDataNodes(); expectedInServiceNodes=nodes; assertEquals(nodes,datanodes.size()); assertEquals(nodes,namesystem.getNumLiveDataNodes()); assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService()); assertEquals(expectedTotalLoad,namesystem.getTotalLoad()); assertEquals((double)expectedInServiceLoad / expectedInServiceLoad,namesystem.getInServiceXceiverAverage(),EPSILON); DFSOutputStream[] streams=new DFSOutputStream[fileCount]; for (int i=0; i < fileCount; i++) { streams[i]=(DFSOutputStream)fs.create(new Path("/f" + i),fileRepl).getWrappedStream(); streams[i].write("1".getBytes()); streams[i].hsync(); expectedTotalLoad+=2 * fileRepl; expectedInServiceLoad+=2 * fileRepl; } triggerHeartbeats(datanodes); assertEquals(nodes,namesystem.getNumLiveDataNodes()); assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService()); assertEquals(expectedTotalLoad,namesystem.getTotalLoad()); assertEquals((double)expectedInServiceLoad / expectedInServiceNodes,namesystem.getInServiceXceiverAverage(),EPSILON); for (int i=0; i < fileRepl; i++) { expectedInServiceNodes--; DatanodeDescriptor dnd=dnm.getDatanode(datanodes.get(i).getDatanodeId()); expectedInServiceLoad-=dnd.getXceiverCount(); dnm.startDecommission(dnd); DataNodeTestUtils.triggerHeartbeat(datanodes.get(i)); Thread.sleep(100); assertEquals(nodes,namesystem.getNumLiveDataNodes()); assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService()); assertEquals(expectedTotalLoad,namesystem.getTotalLoad()); assertEquals((double)expectedInServiceLoad / expectedInServiceNodes,namesystem.getInServiceXceiverAverage(),EPSILON); } for (int i=0; i < fileCount; i++) { int decomm=0; for ( DatanodeInfo dni : streams[i].getPipeline()) { DatanodeDescriptor dnd=dnm.getDatanode(dni); expectedTotalLoad-=2; if (dnd.isDecommissionInProgress() || dnd.isDecommissioned()) { decomm++; } else { expectedInServiceLoad-=2; } } try { streams[i].close(); } catch ( IOException ioe) { if (decomm < fileRepl) { throw ioe; } } triggerHeartbeats(datanodes); assertEquals(nodes,namesystem.getNumLiveDataNodes()); assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService()); assertEquals(expectedTotalLoad,namesystem.getTotalLoad()); assertEquals((double)expectedInServiceLoad / expectedInServiceNodes,namesystem.getInServiceXceiverAverage(),EPSILON); } for (int i=0; i < nodes; i++) { DataNode dn=datanodes.get(i); dn.shutdown(); DatanodeDescriptor dnDesc=dnm.getDatanode(dn.getDatanodeId()); dnDesc.setLastUpdate(0L); BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager()); assertEquals(nodes - 1 - i,namesystem.getNumLiveDataNodes()); if (i >= fileRepl) { expectedInServiceNodes--; } assertEquals(expectedInServiceNodes,namesystem.getNumDatanodesInService()); double expectedXceiverAvg=(i == nodes - 1) ? 0.0 : 1.0; assertEquals((double)expectedXceiverAvg,namesystem.getInServiceXceiverAverage(),EPSILON); } assertEquals(0,namesystem.getNumLiveDataNodes()); assertEquals(0,namesystem.getNumDatanodesInService()); assertEquals(0.0,namesystem.getTotalLoad(),EPSILON); assertEquals(0.0,namesystem.getInServiceXceiverAverage(),EPSILON); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestNamenodeRetryCache

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test for crateSnapshot */ @Test public void testSnapshotMethods() throws Exception { String dir="/testNamenodeRetryCache/testCreateSnapshot/src"; resetCall(); namesystem.mkdirs(dir,perm,true); namesystem.allowSnapshot(dir); newCall(); String name=namesystem.createSnapshot(dir,"snap1"); Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1")); Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1")); Assert.assertEquals(name,namesystem.createSnapshot(dir,"snap1")); newCall(); try { namesystem.createSnapshot(dir,"snap1"); Assert.fail("testSnapshotMethods expected exception is not thrown"); } catch ( IOException e) { } newCall(); namesystem.renameSnapshot(dir,"snap1","snap2"); namesystem.renameSnapshot(dir,"snap1","snap2"); namesystem.renameSnapshot(dir,"snap1","snap2"); newCall(); try { namesystem.renameSnapshot(dir,"snap1","snap2"); Assert.fail("testSnapshotMethods expected exception is not thrown"); } catch ( IOException e) { } newCall(); namesystem.deleteSnapshot(dir,"snap2"); namesystem.deleteSnapshot(dir,"snap2"); namesystem.deleteSnapshot(dir,"snap2"); newCall(); try { namesystem.deleteSnapshot(dir,"snap2"); Assert.fail("testSnapshotMethods expected exception is not thrown"); } catch ( IOException e) { } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * After run a set of operations, restart NN and check if the retry cache has * been rebuilt based on the editlog. */ @Test public void testRetryCacheRebuild() throws Exception { DFSTestUtil.runOperations(cluster,filesystem,conf,BlockSize,0); LightWeightCache cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet(); assertEquals(23,cacheSet.size()); Map oldEntries=new HashMap(); Iterator iter=cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry=iter.next(); oldEntries.put(entry,entry); } cluster.restartNameNode(); cluster.waitActive(); namesystem=cluster.getNamesystem(); assertTrue(namesystem.hasRetryCache()); cacheSet=(LightWeightCache)namesystem.getRetryCache().getCacheSet(); assertEquals(23,cacheSet.size()); iter=cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry=iter.next(); assertTrue(oldEntries.containsKey(entry)); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test for create file */ @Test public void testCreate() throws Exception { String src="/testNamenodeRetryCache/testCreate/file"; newCall(); HdfsFileStatus status=namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null); Assert.assertEquals(status,namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null)); Assert.assertEquals(status,namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null)); newCall(); try { namesystem.startFile(src,perm,"holder","clientmachine",EnumSet.of(CreateFlag.CREATE),true,(short)1,BlockSize,null); Assert.fail("testCreate - expected exception is not thrown"); } catch ( IOException e) { } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test for rename1 */ @Test public void testAppend() throws Exception { String src="/testNamenodeRetryCache/testAppend/src"; resetCall(); DFSTestUtil.createFile(filesystem,new Path(src),128,(short)1,0L); newCall(); LocatedBlock b=namesystem.appendFile(src,"holder","clientMachine"); Assert.assertEquals(b,namesystem.appendFile(src,"holder","clientMachine")); Assert.assertEquals(b,namesystem.appendFile(src,"holder","clientMachine")); newCall(); try { namesystem.appendFile(src,"holder","clientMachine"); Assert.fail("testAppend - expected exception is not thrown"); } catch ( Exception e) { } }

Class: org.apache.hadoop.hdfs.server.namenode.TestParallelImageWrite

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * check if DFS remains in proper condition after a restart */ @Test public void testRestartDFS() throws Exception { final Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FSNamesystem fsn=null; int numNamenodeDirs; DFSTestUtil files=new DFSTestUtil.Builder().setName("TestRestartDFS").setNumFiles(200).build(); final String dir="/srcdat"; final Path rootpath=new Path("/"); final Path dirpath=new Path(dir); long rootmtime; FileStatus rootstatus; FileStatus dirstatus; try { cluster=new MiniDFSCluster.Builder(conf).format(true).numDataNodes(NUM_DATANODES).build(); String[] nameNodeDirs=conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,new String[]{}); numNamenodeDirs=nameNodeDirs.length; assertTrue("failed to get number of Namenode StorageDirs",numNamenodeDirs != 0); FileSystem fs=cluster.getFileSystem(); files.createFiles(fs,dir); rootmtime=fs.getFileStatus(rootpath).getModificationTime(); rootstatus=fs.getFileStatus(dirpath); dirstatus=fs.getFileStatus(dirpath); fs.setOwner(rootpath,rootstatus.getOwner() + "_XXX",null); fs.setOwner(dirpath,null,dirstatus.getGroup() + "_XXX"); } finally { if (cluster != null) { cluster.shutdown(); } } try { conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,1); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(NUM_DATANODES).build(); fsn=cluster.getNamesystem(); FileSystem fs=cluster.getFileSystem(); assertTrue("Filesystem corrupted after restart.",files.checkFiles(fs,dir)); final FileStatus newrootstatus=fs.getFileStatus(rootpath); assertEquals(rootmtime,newrootstatus.getModificationTime()); assertEquals(rootstatus.getOwner() + "_XXX",newrootstatus.getOwner()); assertEquals(rootstatus.getGroup(),newrootstatus.getGroup()); final FileStatus newdirstatus=fs.getFileStatus(dirpath); assertEquals(dirstatus.getOwner(),newdirstatus.getOwner()); assertEquals(dirstatus.getGroup() + "_XXX",newdirstatus.getGroup()); rootmtime=fs.getFileStatus(rootpath).getModificationTime(); final String checkAfterRestart=checkImages(fsn,numNamenodeDirs); files.cleanup(fs,dir); files.createFiles(fs,dir); fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); cluster.getNameNodeRpc().saveNamespace(); final String checkAfterModify=checkImages(fsn,numNamenodeDirs); assertFalse("Modified namespace should change fsimage contents. " + "was: " + checkAfterRestart + " now: "+ checkAfterModify,checkAfterRestart.equals(checkAfterModify)); fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); files.cleanup(fs,dir); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestProcessCorruptBlocks

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * The corrupt block has to be removed when the number of valid replicas * matches replication factor for the file. In this the above condition is * tested by reducing the replication factor * The test strategy : * Bring up Cluster with 3 DataNodes * Create a file of replication factor 3 * Corrupt one replica of a block of the file * Verify that there are still 2 good replicas and 1 corrupt replica * (corrupt replica should not be removed since number of good * replicas (2) is less than replication factor (3)) * Set the replication factor to 2 * Verify that the corrupt replica is removed. * (corrupt replica should not be removed since number of good * replicas (2) is equal to replication factor (2)) */ @Test public void testWhenDecreasingReplication() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2)); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); try { final Path fileName=new Path("/foo1"); DFSTestUtil.createFile(fs,fileName,2,(short)3,0L); DFSTestUtil.waitReplication(fs,fileName,(short)3); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName); corruptBlock(cluster,fs,fileName,0,block); DFSTestUtil.waitReplication(fs,fileName,(short)2); assertEquals(2,countReplicas(namesystem,block).liveReplicas()); assertEquals(1,countReplicas(namesystem,block).corruptReplicas()); namesystem.setReplication(fileName.toString(),(short)2); try { Thread.sleep(3000); } catch ( InterruptedException ignored) { } assertEquals(2,countReplicas(namesystem,block).liveReplicas()); assertEquals(0,countReplicas(namesystem,block).corruptReplicas()); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * None of the blocks can be removed if all blocks are corrupt. * The test strategy : * Bring up Cluster with 3 DataNodes * Create a file of replication factor 3 * Corrupt all three replicas * Verify that all replicas are corrupt and 3 replicas are present. * Set the replication factor to 1 * Verify that all replicas are corrupt and 3 replicas are present. */ @Test public void testWithAllCorruptReplicas() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2)); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); try { final Path fileName=new Path("/foo1"); DFSTestUtil.createFile(fs,fileName,2,(short)3,0L); DFSTestUtil.waitReplication(fs,fileName,(short)3); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName); corruptBlock(cluster,fs,fileName,0,block); corruptBlock(cluster,fs,fileName,1,block); corruptBlock(cluster,fs,fileName,2,block); try { Thread.sleep(3000); } catch ( InterruptedException ignored) { } assertEquals(0,countReplicas(namesystem,block).liveReplicas()); assertEquals(3,countReplicas(namesystem,block).corruptReplicas()); namesystem.setReplication(fileName.toString(),(short)1); try { Thread.sleep(3000); } catch ( InterruptedException ignored) { } assertEquals(0,countReplicas(namesystem,block).liveReplicas()); assertEquals(3,countReplicas(namesystem,block).corruptReplicas()); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * The corrupt block has to be removed when the number of valid replicas * matches replication factor for the file. In this test, the above * condition is achieved by increasing the number of good replicas by * replicating on a new Datanode. * The test strategy : * Bring up Cluster with 3 DataNodes * Create a file of replication factor 3 * Corrupt one replica of a block of the file * Verify that there are still 2 good replicas and 1 corrupt replica * (corrupt replica should not be removed since number of good replicas * (2) is less than replication factor (3)) * Start a new data node * Verify that the a new replica is created and corrupt replica is * removed. */ @Test public void testByAddingAnExtraDataNode() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2)); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); FileSystem fs=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); DataNodeProperties dnPropsFourth=cluster.stopDataNode(3); try { final Path fileName=new Path("/foo1"); DFSTestUtil.createFile(fs,fileName,2,(short)3,0L); DFSTestUtil.waitReplication(fs,fileName,(short)3); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName); corruptBlock(cluster,fs,fileName,0,block); DFSTestUtil.waitReplication(fs,fileName,(short)2); assertEquals(2,countReplicas(namesystem,block).liveReplicas()); assertEquals(1,countReplicas(namesystem,block).corruptReplicas()); cluster.restartDataNode(dnPropsFourth); DFSTestUtil.waitReplication(fs,fileName,(short)3); assertEquals(3,countReplicas(namesystem,block).liveReplicas()); assertEquals(0,countReplicas(namesystem,block).corruptReplicas()); } finally { cluster.shutdown(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * The corrupt block has to be removed when the number of valid replicas * matches replication factor for the file. The above condition should hold * true as long as there is one good replica. This test verifies that. * The test strategy : * Bring up Cluster with 2 DataNodes * Create a file of replication factor 2 * Corrupt one replica of a block of the file * Verify that there is one good replicas and 1 corrupt replica * (corrupt replica should not be removed since number of good * replicas (1) is less than replication factor (2)). * Set the replication factor to 1 * Verify that the corrupt replica is removed. * (corrupt replica should be removed since number of good * replicas (1) is equal to replication factor (1)) */ @Test(timeout=20000) public void testWithReplicationFactorAsOne() throws Exception { Configuration conf=new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,1000L); conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,Integer.toString(2)); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); try { final Path fileName=new Path("/foo1"); DFSTestUtil.createFile(fs,fileName,2,(short)2,0L); DFSTestUtil.waitReplication(fs,fileName,(short)2); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,fileName); corruptBlock(cluster,fs,fileName,0,block); DFSTestUtil.waitReplication(fs,fileName,(short)1); assertEquals(1,countReplicas(namesystem,block).liveReplicas()); assertEquals(1,countReplicas(namesystem,block).corruptReplicas()); namesystem.setReplication(fileName.toString(),(short)1); for (int i=0; i < 10; i++) { try { Thread.sleep(1000); } catch ( InterruptedException ignored) { } if (countReplicas(namesystem,block).corruptReplicas() == 0) { break; } } assertEquals(1,countReplicas(namesystem,block).liveReplicas()); assertEquals(0,countReplicas(namesystem,block).corruptReplicas()); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestSaveNamespace

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testTxIdPersistence() throws Exception { Configuration conf=getConf(); NameNode.initMetrics(conf,NamenodeRole.NAMENODE); DFSTestUtil.formatNameNode(conf); FSNamesystem fsn=FSNamesystem.loadFromDisk(conf); try { assertEquals(1,fsn.getEditLog().getLastWrittenTxId()); doAnEdit(fsn,1); assertEquals(2,fsn.getEditLog().getLastWrittenTxId()); fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER); fsn.saveNamespace(); assertEquals(4,fsn.getEditLog().getLastWrittenTxId()); fsn.getFSImage().close(); fsn.close(); assertEquals(5,fsn.getEditLog().getLastWrittenTxId()); fsn=null; fsn=FSNamesystem.loadFromDisk(conf); assertEquals(6,fsn.getEditLog().getLastWrittenTxId()); } finally { if (fsn != null) { fsn.close(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestSecondaryWebUi

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSecondaryWebUi() throws IOException, MalformedObjectNameException, AttributeNotFoundException, MBeanException, ReflectionException, InstanceNotFoundException { MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=SecondaryNameNode,name=SecondaryNameNodeInfo"); String[] checkpointDir=(String[])mbs.getAttribute(mxbeanName,"CheckpointDirectories"); Assert.assertArrayEquals(checkpointDir,snn.getCheckpointDirectories()); String[] checkpointEditlogDir=(String[])mbs.getAttribute(mxbeanName,"CheckpointEditlogDirectories"); Assert.assertArrayEquals(checkpointEditlogDir,snn.getCheckpointEditlogDirectories()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestSecureNameNode

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testName() throws IOException, InterruptedException { MiniDFSCluster cluster=null; try { String keyTabDir=System.getProperty("kdc.resource.dir") + "/keytabs"; String nn1KeytabPath=keyTabDir + "/nn1.keytab"; String user1KeyTabPath=keyTabDir + "/user1.keytab"; Configuration conf=new HdfsConfiguration(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,"nn1/localhost@EXAMPLE.COM"); conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nn1KeytabPath); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build(); final MiniDFSCluster clusterRef=cluster; cluster.waitActive(); FileSystem fsForCurrentUser=cluster.getFileSystem(); fsForCurrentUser.mkdirs(new Path("/tmp")); fsForCurrentUser.setPermission(new Path("/tmp"),new FsPermission((short)511)); UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",user1KeyTabPath); FileSystem fs=ugi.doAs(new PrivilegedExceptionAction(){ @Override public FileSystem run() throws Exception { return clusterRef.getFileSystem(); } } ); try { Path p=new Path("/users"); fs.mkdirs(p); fail("user1 must not be allowed to write in /"); } catch ( IOException expected) { } Path p=new Path("/tmp/alpha"); fs.mkdirs(p); assertNotNull(fs.listStatus(p)); assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod()); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestSecureNameNodeWithExternalKdc

APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSecureNameNode() throws IOException, InterruptedException { MiniDFSCluster cluster=null; try { String nnPrincipal=System.getProperty("dfs.namenode.kerberos.principal"); String nnSpnegoPrincipal=System.getProperty("dfs.namenode.kerberos.internal.spnego.principal"); String nnKeyTab=System.getProperty("dfs.namenode.keytab.file"); assertNotNull("NameNode principal was not specified",nnPrincipal); assertNotNull("NameNode SPNEGO principal was not specified",nnSpnegoPrincipal); assertNotNull("NameNode keytab was not specified",nnKeyTab); Configuration conf=new HdfsConfiguration(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,nnPrincipal); conf.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,nnSpnegoPrincipal); conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,nnKeyTab); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build(); final MiniDFSCluster clusterRef=cluster; cluster.waitActive(); FileSystem fsForCurrentUser=cluster.getFileSystem(); fsForCurrentUser.mkdirs(new Path("/tmp")); fsForCurrentUser.setPermission(new Path("/tmp"),new FsPermission((short)511)); String userPrincipal=System.getProperty("user.principal"); String userKeyTab=System.getProperty("user.keytab"); assertNotNull("User principal was not specified",userPrincipal); assertNotNull("User keytab was not specified",userKeyTab); UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI(userPrincipal,userKeyTab); FileSystem fs=ugi.doAs(new PrivilegedExceptionAction(){ @Override public FileSystem run() throws Exception { return clusterRef.getFileSystem(); } } ); try { Path p=new Path("/users"); fs.mkdirs(p); fail("User must not be allowed to write in /"); } catch ( IOException expected) { } Path p=new Path("/tmp/alpha"); fs.mkdirs(p); assertNotNull(fs.listStatus(p)); assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod()); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestSecurityTokenEditLog

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests transaction logging in dfs. */ @Test public void testEditLog() throws IOException { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; FileSystem fileSys=null; try { conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,true); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build(); cluster.waitActive(); fileSys=cluster.getFileSystem(); final FSNamesystem namesystem=cluster.getNamesystem(); for (Iterator it=cluster.getNameDirs(0).iterator(); it.hasNext(); ) { File dir=new File(it.next().getPath()); System.out.println(dir); } FSImage fsimage=namesystem.getFSImage(); FSEditLog editLog=fsimage.getEditLog(); editLog.setOutputBufferCapacity(2048); Thread threadId[]=new Thread[NUM_THREADS]; for (int i=0; i < NUM_THREADS; i++) { Transactions trans=new Transactions(namesystem,NUM_TRANSACTIONS); threadId[i]=new Thread(trans,"TransactionThread-" + i); threadId[i].start(); } for (int i=0; i < NUM_THREADS; i++) { try { threadId[i].join(); } catch ( InterruptedException e) { i--; } } editLog.close(); namesystem.getDelegationTokenSecretManager().stopThreads(); int numKeys=namesystem.getDelegationTokenSecretManager().getNumberOfKeys(); int expectedTransactions=NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys + 2; for ( StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) { File editFile=NNStorage.getFinalizedEditsFile(sd,1,1 + expectedTransactions - 1); System.out.println("Verifying file: " + editFile); FSEditLogLoader loader=new FSEditLogLoader(namesystem,0); long numEdits=loader.loadFSEdits(new EditLogFileInputStream(editFile),1); assertEquals("Verification for " + editFile,expectedTransactions,numEdits); } } finally { if (fileSys != null) fileSys.close(); if (cluster != null) cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestSnapshotPathINodes

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)} * for snapshot file while modifying file after snapshot. */ @Test(timeout=15000) public void testSnapshotPathINodesAfterModification() throws Exception { String[] names=INode.getPathNames(file1.toString()); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length); assertEquals(inodes[components.length - 1].getFullPathName(),file1.toString()); final long modTime=inodes[inodes.length - 1].getModificationTime(); hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1,"s3"); DFSTestUtil.appendFile(hdfs,file1,"the content for appending"); String snapshotPath=sub1.toString() + "/.snapshot/s3/file1"; names=INode.getPathNames(snapshotPath); components=INode.getPathComponents(names); INodesInPath ssNodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] ssInodes=ssNodesInPath.getINodes(); assertEquals(ssInodes.length,components.length - 1); final Snapshot s3=getSnapshot(ssNodesInPath,"s3"); assertSnapshot(ssNodesInPath,true,s3,3); INode snapshotFileNode=ssInodes[ssInodes.length - 1]; assertEquals(snapshotFileNode.getLocalName(),file1.getName()); assertTrue(snapshotFileNode.asFile().isWithSnapshot()); assertEquals(modTime,snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId())); names=INode.getPathNames(file1.toString()); components=INode.getPathComponents(names); INodesInPath newNodesInPath=INodesInPath.resolve(fsdir.rootDir,components); assertSnapshot(newNodesInPath,false,s3,-1); INode[] newInodes=newNodesInPath.getINodes(); assertEquals(newInodes.length,components.length); final int last=components.length - 1; assertEquals(newInodes[last].getFullPathName(),file1.toString()); Assert.assertFalse(modTime == newInodes[last].getModificationTime()); hdfs.deleteSnapshot(sub1,"s3"); hdfs.disallowSnapshot(sub1); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)} * for normal (non-snapshot) file. */ @Test(timeout=15000) public void testNonSnapshotPathINodes() throws Exception { String[] names=INode.getPathNames(file1.toString()); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length); assertSnapshot(nodesInPath,false,null,-1); assertTrue("file1=" + file1 + ", nodesInPath="+ nodesInPath,inodes[components.length - 1] != null); assertEquals(inodes[components.length - 1].getFullPathName(),file1.toString()); assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString()); assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString()); nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,1,false); inodes=nodesInPath.getINodes(); assertEquals(inodes.length,1); assertSnapshot(nodesInPath,false,null,-1); assertEquals(inodes[0].getFullPathName(),file1.toString()); nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,2,false); inodes=nodesInPath.getINodes(); assertEquals(inodes.length,2); assertSnapshot(nodesInPath,false,null,-1); assertEquals(inodes[1].getFullPathName(),file1.toString()); assertEquals(inodes[0].getFullPathName(),sub1.toString()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)} * for snapshot file while adding a new file after snapshot. */ @Test(timeout=15000) public void testSnapshotPathINodesWithAddedFile() throws Exception { hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1,"s4"); final Path file3=new Path(sub1,"file3"); DFSTestUtil.createFile(hdfs,file3,1024,REPLICATION,seed); { String snapshotPath=sub1.toString() + "/.snapshot/s4/file3"; String[] names=INode.getPathNames(snapshotPath); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length - 1); assertEquals(nodesInPath.getNumNonNull(),components.length - 2); s4=getSnapshot(nodesInPath,"s4"); assertSnapshot(nodesInPath,true,s4,3); assertNull(inodes[inodes.length - 1]); } String[] names=INode.getPathNames(file3.toString()); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length); assertSnapshot(nodesInPath,false,s4,-1); assertEquals(inodes[components.length - 1].getFullPathName(),file3.toString()); assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString()); assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString()); hdfs.deleteSnapshot(sub1,"s4"); hdfs.disallowSnapshot(sub1); }

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)} * for snapshot file. */ @Test(timeout=15000) public void testSnapshotPathINodes() throws Exception { hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1,"s1"); String snapshotPath=sub1.toString() + "/.snapshot/s1/file1"; String[] names=INode.getPathNames(snapshotPath); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length - 1); final Snapshot snapshot=getSnapshot(nodesInPath,"s1"); assertSnapshot(nodesInPath,true,snapshot,3); INode snapshotFileNode=inodes[inodes.length - 1]; assertINodeFile(snapshotFileNode,file1); assertTrue(snapshotFileNode.getParent().isWithSnapshot()); nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,1,false); inodes=nodesInPath.getINodes(); assertEquals(inodes.length,1); assertSnapshot(nodesInPath,true,snapshot,-1); assertINodeFile(nodesInPath.getLastINode(),file1); nodesInPath=INodesInPath.resolve(fsdir.rootDir,components,2,false); inodes=nodesInPath.getINodes(); assertEquals(inodes.length,2); assertSnapshot(nodesInPath,true,snapshot,0); assertINodeFile(nodesInPath.getLastINode(),file1); String dotSnapshotPath=sub1.toString() + "/.snapshot"; names=INode.getPathNames(dotSnapshotPath); components=INode.getPathComponents(names); nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length - 1); assertSnapshot(nodesInPath,true,snapshot,-1); final INode last=nodesInPath.getLastINode(); assertEquals(last.getFullPathName(),sub1.toString()); assertFalse(last instanceof INodeFile); String[] invalidPathComponent={"invalidDir","foo",".snapshot","bar"}; Path invalidPath=new Path(invalidPathComponent[0]); for (int i=1; i < invalidPathComponent.length; i++) { invalidPath=new Path(invalidPath,invalidPathComponent[i]); try { hdfs.getFileStatus(invalidPath); Assert.fail(); } catch ( FileNotFoundException fnfe) { System.out.println("The exception is expected: " + fnfe); } } hdfs.deleteSnapshot(sub1,"s1"); hdfs.disallowSnapshot(sub1); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test {@link INodeDirectory#getExistingPathINodes(byte[][],int,boolean)} * for snapshot file after deleting the original file. */ @Test(timeout=15000) public void testSnapshotPathINodesAfterDeletion() throws Exception { hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1,"s2"); hdfs.delete(file1,false); final Snapshot snapshot; { String snapshotPath=sub1.toString() + "/.snapshot/s2/file1"; String[] names=INode.getPathNames(snapshotPath); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length - 1); snapshot=getSnapshot(nodesInPath,"s2"); assertSnapshot(nodesInPath,true,snapshot,3); final INode inode=inodes[inodes.length - 1]; assertEquals(file1.getName(),inode.getLocalName()); assertTrue(inode.asFile().isWithSnapshot()); } String[] names=INode.getPathNames(file1.toString()); byte[][] components=INode.getPathComponents(names); INodesInPath nodesInPath=INodesInPath.resolve(fsdir.rootDir,components); INode[] inodes=nodesInPath.getINodes(); assertEquals(inodes.length,components.length); assertEquals(nodesInPath.getNumNonNull(),components.length - 1); assertSnapshot(nodesInPath,false,snapshot,-1); assertNull(inodes[components.length - 1]); assertEquals(inodes[components.length - 2].getFullPathName(),sub1.toString()); assertEquals(inodes[components.length - 3].getFullPathName(),dir.toString()); hdfs.deleteSnapshot(sub1,"s2"); hdfs.disallowSnapshot(sub1); }

Class: org.apache.hadoop.hdfs.server.namenode.TestStartup

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * secnn-7 * secondary node copies fsimage and edits into correct separate directories. * @throws IOException */ @Test public void testSNNStartup() throws IOException { LOG.info("--starting SecondNN startup test"); config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,fileAsURI(new File(hdfsDir,"name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,fileAsURI(new File(hdfsDir,"name")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,fileAsURI(new File(hdfsDir,"chkpt_edits")).toString()); config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,fileAsURI(new File(hdfsDir,"chkpt")).toString()); LOG.info("--starting NN "); MiniDFSCluster cluster=null; SecondaryNameNode sn=null; NameNode nn=null; try { cluster=new MiniDFSCluster.Builder(config).manageDataDfsDirs(false).manageNameDfsDirs(false).build(); cluster.waitActive(); nn=cluster.getNameNode(); assertNotNull(nn); LOG.info("--starting SecondNN"); sn=new SecondaryNameNode(config); assertNotNull(sn); LOG.info("--doing checkpoint"); sn.doCheckpoint(); LOG.info("--done checkpoint"); FSImage image=nn.getFSImage(); StorageDirectory sd=image.getStorage().getStorageDir(0); assertEquals(sd.getStorageDirType(),NameNodeDirType.IMAGE_AND_EDITS); image.getStorage(); File imf=NNStorage.getStorageFile(sd,NameNodeFile.IMAGE,0); image.getStorage(); File edf=NNStorage.getStorageFile(sd,NameNodeFile.EDITS,0); LOG.info("--image file " + imf.getAbsolutePath() + "; len = "+ imf.length()); LOG.info("--edits file " + edf.getAbsolutePath() + "; len = "+ edf.length()); FSImage chkpImage=sn.getFSImage(); verifyDifferentDirs(chkpImage,imf.length(),edf.length()); } catch ( IOException e) { fail(StringUtils.stringifyException(e)); System.err.println("checkpoint failed"); throw e; } finally { if (sn != null) sn.shutdown(); if (cluster != null) cluster.shutdown(); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testXattrConfiguration() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; try { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,-1); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); fail("Expected exception with negative xattr size"); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Cannot set a negative value for the maximum size of an xattr",e); } finally { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); if (cluster != null) { cluster.shutdown(); } } try { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,-1); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); fail("Expected exception with negative # xattrs per inode"); } catch ( IllegalArgumentException e) { GenericTestUtils.assertExceptionContains("Cannot set a negative limit on the number of xattrs per inode",e); } finally { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT); if (cluster != null) { cluster.shutdown(); } } try { final LogVerificationAppender appender=new LogVerificationAppender(); final Logger logger=Logger.getRootLogger(); logger.addAppender(appender); int count=appender.countLinesWithMessage("Maximum size of an xattr: 0 (unlimited)"); assertEquals("Expected no messages about unlimited xattr size",0,count); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,0); cluster=new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); count=appender.countLinesWithMessage("Maximum size of an xattr: 0 (unlimited)"); assertEquals("Expected unlimited xattr size",2,count); } finally { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY,DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier EqualityVerifier 
/** * Verify the following scenario. * 1. NN restarts. * 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister. * 3. After reregistration completes, DN will send Heartbeat, followed by * Blockreport. * 4. NN will mark DatanodeStorageInfo#blockContentsStale to false. * @throws Exception */ @Test(timeout=60000) public void testStorageBlockContentsStaleAfterNNRestart() throws Exception { MiniDFSCluster dfsCluster=null; try { Configuration config=new Configuration(); dfsCluster=new MiniDFSCluster.Builder(config).numDataNodes(1).build(); dfsCluster.waitActive(); dfsCluster.restartNameNode(true); BlockManagerTestUtil.checkHeartbeat(dfsCluster.getNamesystem().getBlockManager()); MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanNameFsns=new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState"); Integer numStaleStorages=(Integer)(mbs.getAttribute(mxbeanNameFsns,"NumStaleStorages")); assertEquals(0,numStaleStorages.intValue()); } finally { if (dfsCluster != null) { dfsCluster.shutdown(); } } return; }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * This test tests hosts include list contains host names. After namenode * restarts, the still alive datanodes should not have any trouble in getting * registrant again. */ @Test public void testNNRestart() throws IOException, InterruptedException { MiniDFSCluster cluster=null; FileSystem localFileSys; Path hostsFile; Path excludeFile; int HEARTBEAT_INTERVAL=1; localFileSys=FileSystem.getLocal(config); Path workingDir=localFileSys.getWorkingDirectory(); Path dir=new Path(workingDir,"build/test/data/work-dir/restartnn"); hostsFile=new Path(dir,"hosts"); excludeFile=new Path(dir,"exclude"); config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath()); writeConfigFile(localFileSys,excludeFile,null); config.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath()); ArrayList list=new ArrayList(); byte b[]={127,0,0,1}; InetAddress inetAddress=InetAddress.getByAddress(b); list.add(inetAddress.getHostName()); writeConfigFile(localFileSys,hostsFile,list); int numDatanodes=1; try { cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).setupHostsFile(true).build(); cluster.waitActive(); cluster.restartNameNode(); NamenodeProtocols nn=cluster.getNameNodeRpc(); assertNotNull(nn); assertTrue(cluster.isDataNodeUp()); DatanodeInfo[] info=nn.getDatanodeReport(DatanodeReportType.LIVE); for (int i=0; i < 5 && info.length != numDatanodes; i++) { Thread.sleep(HEARTBEAT_INTERVAL * 1000); info=nn.getDatanodeReport(DatanodeReportType.LIVE); } assertEquals("Number of live nodes should be " + numDatanodes,numDatanodes,info.length); } catch ( IOException e) { fail(StringUtils.stringifyException(e)); throw e; } finally { cleanupFile(localFileSys,excludeFile.getParent()); if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.TestStartupOptionUpgrade

InternalCallVerifier EqualityVerifier 
/** * Tests the upgrade from version 0.22 to Federation version Test with * clusterid case: -upgrade -clusterid * Expected to reuse user given clusterid * @throws Exception */ @Test public void testStartupOptUpgradeFrom22WithCID() throws Exception { startOpt.setClusterId("cid"); layoutVersion=Feature.RESERVED_REL22.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt,layoutVersion); assertEquals("Clusterid should match with the given clusterid","cid",storage.getClusterID()); }

InternalCallVerifier EqualityVerifier 
/** * Tests the upgrade from one version of Federation to another Federation * version Test with wrong clusterid case: -upgrade -clusterid * Expected to reuse existing clusterid and ignore user given clusterid * @throws Exception */ @Test public void testStartupOptUpgradeFromFederationWithWrongCID() throws Exception { startOpt.setClusterId("wrong-cid"); storage.setClusterID("currentcid"); layoutVersion=Feature.FEDERATION.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt,layoutVersion); assertEquals("Clusterid should match with the existing one","currentcid",storage.getClusterID()); }

InternalCallVerifier EqualityVerifier 
/** * Tests the upgrade from one version of Federation to another Federation * version Test without clusterid case: -upgrade * Expected to reuse existing clusterid * @throws Exception */ @Test public void testStartupOptUpgradeFromFederation() throws Exception { storage.setClusterID("currentcid"); layoutVersion=Feature.FEDERATION.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt,layoutVersion); assertEquals("Clusterid should match with the existing one","currentcid",storage.getClusterID()); }

InternalCallVerifier EqualityVerifier 
/** * Tests the upgrade from one version of Federation to another Federation * version Test with correct clusterid case: -upgrade -clusterid * Expected to reuse existing clusterid and ignore user given clusterid * @throws Exception */ @Test public void testStartupOptUpgradeFromFederationWithCID() throws Exception { startOpt.setClusterId("currentcid"); storage.setClusterID("currentcid"); layoutVersion=Feature.FEDERATION.getInfo().getLayoutVersion(); storage.processStartupOptionsForUpgrade(startOpt,layoutVersion); assertEquals("Clusterid should match with the existing one","currentcid",storage.getClusterID()); }

Class: org.apache.hadoop.hdfs.server.namenode.TestStartupProgressServlet

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRunningState() throws Exception { setStartupProgressForRunningState(startupProgress); String respBody=doGetAndReturnResponseBody(); assertNotNull(respBody); Map expected=ImmutableMap.builder().put("percentComplete",0.375f).put("phases",Arrays.asList(ImmutableMap.builder().put("name","LoadingFsImage").put("desc","Loading fsimage").put("status","COMPLETE").put("percentComplete",1.0f).put("steps",Collections.singletonList(ImmutableMap.builder().put("name","Inodes").put("desc","inodes").put("count",100L).put("total",100L).put("percentComplete",1.0f).build())).build(),ImmutableMap.builder().put("name","LoadingEdits").put("desc","Loading edits").put("status","RUNNING").put("percentComplete",0.5f).put("steps",Collections.singletonList(ImmutableMap.builder().put("count",100L).put("file","file").put("size",1000L).put("total",200L).put("percentComplete",0.5f).build())).build(),ImmutableMap.builder().put("name","SavingCheckpoint").put("desc","Saving checkpoint").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build(),ImmutableMap.builder().put("name","SafeMode").put("desc","Safe mode").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build())).build(); assertEquals(JSON.toString(expected),filterJson(respBody)); }

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInitialState() throws Exception { String respBody=doGetAndReturnResponseBody(); assertNotNull(respBody); Map expected=ImmutableMap.builder().put("percentComplete",0.0f).put("phases",Arrays.asList(ImmutableMap.builder().put("name","LoadingFsImage").put("desc","Loading fsimage").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build(),ImmutableMap.builder().put("name","LoadingEdits").put("desc","Loading edits").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build(),ImmutableMap.builder().put("name","SavingCheckpoint").put("desc","Saving checkpoint").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build(),ImmutableMap.builder().put("name","SafeMode").put("desc","Safe mode").put("status","PENDING").put("percentComplete",0.0f).put("steps",Collections.emptyList()).build())).build(); assertEquals(JSON.toString(expected),filterJson(respBody)); }

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testFinalState() throws Exception { setStartupProgressForFinalState(startupProgress); String respBody=doGetAndReturnResponseBody(); assertNotNull(respBody); Map expected=ImmutableMap.builder().put("percentComplete",1.0f).put("phases",Arrays.asList(ImmutableMap.builder().put("name","LoadingFsImage").put("desc","Loading fsimage").put("status","COMPLETE").put("percentComplete",1.0f).put("steps",Collections.singletonList(ImmutableMap.builder().put("name","Inodes").put("desc","inodes").put("count",100L).put("total",100L).put("percentComplete",1.0f).build())).build(),ImmutableMap.builder().put("name","LoadingEdits").put("desc","Loading edits").put("status","COMPLETE").put("percentComplete",1.0f).put("steps",Collections.singletonList(ImmutableMap.builder().put("count",200L).put("file","file").put("size",1000L).put("total",200L).put("percentComplete",1.0f).build())).build(),ImmutableMap.builder().put("name","SavingCheckpoint").put("desc","Saving checkpoint").put("status","COMPLETE").put("percentComplete",1.0f).put("steps",Collections.singletonList(ImmutableMap.builder().put("name","Inodes").put("desc","inodes").put("count",300L).put("total",300L).put("percentComplete",1.0f).build())).build(),ImmutableMap.builder().put("name","SafeMode").put("desc","Safe mode").put("status","COMPLETE").put("percentComplete",1.0f).put("steps",Collections.singletonList(ImmutableMap.builder().put("name","AwaitingReportedBlocks").put("desc","awaiting reported blocks").put("count",400L).put("total",400L).put("percentComplete",1.0f).build())).build())).build(); assertEquals(JSON.toString(expected),filterJson(respBody)); }

Class: org.apache.hadoop.hdfs.server.namenode.TestStorageRestore

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test dfsadmin -restoreFailedStorage command * @throws Exception */ @Test public void testDfsAdminCmd() throws Exception { cluster=new MiniDFSCluster.Builder(config).numDataNodes(2).manageNameDfsDirs(false).build(); cluster.waitActive(); try { FSImage fsi=cluster.getNameNode().getFSImage(); boolean restore=fsi.getStorage().getRestoreFailedStorage(); LOG.info("Restore is " + restore); assertEquals(restore,true); String cmd="-fs NAMENODE -restoreFailedStorage false"; String namenode=config.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY,"file:///"); CommandExecutor executor=new CLITestCmdDFS(cmd,new CLICommandDFSAdmin()).getExecutor(namenode); executor.executeCommand(cmd); restore=fsi.getStorage().getRestoreFailedStorage(); assertFalse("After set true call restore is " + restore,restore); cmd="-fs NAMENODE -restoreFailedStorage true"; executor.executeCommand(cmd); restore=fsi.getStorage().getRestoreFailedStorage(); assertTrue("After set false call restore is " + restore,restore); cmd="-fs NAMENODE -restoreFailedStorage check"; CommandExecutor.Result cmdResult=executor.executeCommand(cmd); restore=fsi.getStorage().getRestoreFailedStorage(); assertTrue("After check call restore is " + restore,restore); String commandOutput=cmdResult.getCommandOutput(); commandOutput.trim(); assertTrue(commandOutput.contains("restoreFailedStorage is set to true")); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.TestTransferFsImage

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test to verify the timeout of Image upload */ @Test(timeout=10000) public void testImageUploadTimeout() throws Exception { Configuration conf=new HdfsConfiguration(); NNStorage mockStorage=Mockito.mock(NNStorage.class); HttpServer2 testServer=HttpServerFunctionalTest.createServer("hdfs"); try { testServer.addServlet("ImageTransfer",ImageServlet.PATH_SPEC,TestImageTransferServlet.class); testServer.start(); URL serverURL=HttpServerFunctionalTest.getServerURL(testServer); TransferFsImage.timeout=2000; File tmpDir=new File(new FileSystemTestHelper().getTestRootDir()); tmpDir.mkdirs(); File mockImageFile=File.createTempFile("image","",tmpDir); FileOutputStream imageFile=new FileOutputStream(mockImageFile); imageFile.write("data".getBytes()); imageFile.close(); Mockito.when(mockStorage.findImageFile(Mockito.any(NameNodeFile.class),Mockito.anyLong())).thenReturn(mockImageFile); Mockito.when(mockStorage.toColonSeparatedString()).thenReturn("storage:info:string"); try { TransferFsImage.uploadImageFromStorage(serverURL,conf,mockStorage,NameNodeFile.IMAGE,1L); fail("TransferImage Should fail with timeout"); } catch ( SocketTimeoutException e) { assertEquals("Upload should timeout","Read timed out",e.getMessage()); } } finally { testServer.stop(); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test to verify the read timeout */ @Test(timeout=5000) public void testGetImageTimeout() throws Exception { HttpServer2 testServer=HttpServerFunctionalTest.createServer("hdfs"); try { testServer.addServlet("ImageTransfer",ImageServlet.PATH_SPEC,TestImageTransferServlet.class); testServer.start(); URL serverURL=HttpServerFunctionalTest.getServerURL(testServer); TransferFsImage.timeout=2000; try { TransferFsImage.getFileClient(serverURL,"txid=1",null,null,false); fail("TransferImage Should fail with timeout"); } catch ( SocketTimeoutException e) { assertEquals("Read should timeout","Read timed out",e.getMessage()); } } finally { if (testServer != null) { testServer.stop(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestBootstrapStandby

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for the case where the shared edits dir doesn't have * all of the recent edit logs. */ @Test public void testSharedEditsMissingLogs() throws Exception { removeStandbyNameDirs(); CheckpointSignature sig=nn0.getRpcServer().rollEditLog(); assertEquals(3,sig.getCurSegmentTxId()); URI editsUri=cluster.getSharedEditsDir(0,1); File editsDir=new File(editsUri); File editsSegment=new File(new File(editsDir,"current"),NNStorage.getFinalizedEditsFileName(1,2)); GenericTestUtils.assertExists(editsSegment); assertTrue(editsSegment.delete()); LogCapturer logs=GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(BootstrapStandby.class)); try { int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE,rc); } finally { logs.stopCapturing(); } GenericTestUtils.assertMatches(logs.getOutput(),"FATAL.*Unable to read transaction ids 1-3 from the configured shared"); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test for the base success case. The primary NN * hasn't made any checkpoints, and we copy the fsimage_0 * file over and start up. */ @Test public void testSuccessfulBaseCase() throws Exception { removeStandbyNameDirs(); try { cluster.restartNameNode(1); fail("Did not throw"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("storage directory does not exist or is not accessible",ioe); } int rc=BootstrapStandby.run(new String[]{"-nonInteractive"},cluster.getConfiguration(1)); assertEquals(0,rc); FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0)); FSImageTestUtil.assertNNFilesMatch(cluster); cluster.restartNameNode(1); }

InternalCallVerifier EqualityVerifier 
/** * Test that, even if the other node is not active, we are able * to bootstrap standby from it. */ @Test(timeout=30000) public void testOtherNodeNotActive() throws Exception { cluster.transitionToStandby(0); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); }

InternalCallVerifier EqualityVerifier 
@Test public void testStandbyDirsAlreadyExist() throws Exception { int rc=BootstrapStandby.run(new String[]{"-nonInteractive"},cluster.getConfiguration(1)); assertEquals(BootstrapStandby.ERR_CODE_ALREADY_FORMATTED,rc); rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test for downloading a checkpoint made at a later checkpoint * from the active. */ @Test public void testDownloadingLaterCheckpoint() throws Exception { nn0.getRpcServer().rollEditLog(); nn0.getRpcServer().rollEditLog(); NameNodeAdapter.enterSafeMode(nn0,false); NameNodeAdapter.saveNamespace(nn0); NameNodeAdapter.leaveSafeMode(nn0); long expectedCheckpointTxId=NameNodeAdapter.getNamesystem(nn0).getFSImage().getMostRecentCheckpointTxId(); assertEquals(6,expectedCheckpointTxId); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of((int)expectedCheckpointTxId)); FSImageTestUtil.assertNNFilesMatch(cluster); cluster.restartNameNode(1); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestBootstrapStandbyWithQJM

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * BootstrapStandby when the existing NN is standby */ @Test public void testBootstrapStandbyWithStandbyNN() throws Exception { cluster.transitionToStandby(0); Configuration confNN1=cluster.getConfiguration(1); cluster.shutdownNameNode(1); int rc=BootstrapStandby.run(new String[]{"-force"},confNN1); assertEquals(0,rc); FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0)); FSImageTestUtil.assertNNFilesMatch(cluster); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * BootstrapStandby when the existing NN is active */ @Test public void testBootstrapStandbyWithActiveNN() throws Exception { cluster.transitionToActive(0); Configuration confNN1=cluster.getConfiguration(1); cluster.shutdownNameNode(1); int rc=BootstrapStandby.run(new String[]{"-force"},confNN1); assertEquals(0,rc); FSImageTestUtil.assertNNHasCheckpoints(cluster,1,ImmutableList.of(0)); FSImageTestUtil.assertNNFilesMatch(cluster); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestDFSUpgradeWithHA

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Ensure that an admin cannot finalize an HA upgrade without at least one NN * being active. */ @Test public void testCannotFinalizeIfNoActive() throws IOException, URISyntaxException { MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build(); File sharedDir=new File(cluster.getSharedEditsDir(0,1)); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); checkPreviousDirExistence(sharedDir,false); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkPreviousDirExistence(sharedDir,true); assertTrue(fs.mkdirs(new Path("/foo2"))); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster.restartNameNode(0,false); cluster.transitionToActive(0); assertTrue(fs.mkdirs(new Path("/foo3"))); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); cluster.transitionToStandby(0); cluster.transitionToActive(1); assertTrue(fs.mkdirs(new Path("/foo4"))); assertCTimesEqual(cluster); cluster.transitionToStandby(1); try { runFinalizeCommand(cluster); fail("Should not have been able to finalize upgrade with no NN active"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Cannot finalize with no NameNode active",ioe); } } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that an HA NN with NFS-based HA can successfully start and * upgrade. */ @Test public void testNfsUpgrade() throws IOException, URISyntaxException { MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build(); File sharedDir=new File(cluster.getSharedEditsDir(0,1)); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); checkPreviousDirExistence(sharedDir,false); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkPreviousDirExistence(sharedDir,true); assertTrue(fs.mkdirs(new Path("/foo2"))); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster.restartNameNode(0,false); cluster.transitionToActive(0); assertTrue(fs.mkdirs(new Path("/foo3"))); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); cluster.transitionToStandby(0); cluster.transitionToActive(1); assertTrue(fs.mkdirs(new Path("/foo4"))); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFinalizeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster=null; FileSystem fs=null; try { Builder builder=new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder().numDataNodes(0); qjCluster=builder.build(); MiniDFSCluster cluster=qjCluster.getDfsCluster(); checkJnPreviousDirExistence(qjCluster,false); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); assertTrue(fs.mkdirs(new Path("/foo2"))); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkJnPreviousDirExistence(qjCluster,true); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); runFinalizeCommand(cluster); checkClusterPreviousDirExistence(cluster,false); checkJnPreviousDirExistence(qjCluster,false); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that an HA NN can successfully upgrade when configured using * JournalNodes. */ @Test public void testUpgradeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster=null; FileSystem fs=null; try { Builder builder=new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder().numDataNodes(0); qjCluster=builder.build(); MiniDFSCluster cluster=qjCluster.getDfsCluster(); checkJnPreviousDirExistence(qjCluster,false); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkJnPreviousDirExistence(qjCluster,true); assertTrue(fs.mkdirs(new Path("/foo2"))); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR); cluster.restartNameNode(0,false); cluster.transitionToActive(0); assertTrue(fs.mkdirs(new Path("/foo3"))); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); cluster.transitionToStandby(0); cluster.transitionToActive(1); assertTrue(fs.mkdirs(new Path("/foo4"))); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test rollback with NFS shared dir. */ @Test public void testRollbackWithNfs() throws Exception { MiniDFSCluster cluster=null; FileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build(); File sharedDir=new File(cluster.getSharedEditsDir(0,1)); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); checkPreviousDirExistence(sharedDir,false); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkPreviousDirExistence(sharedDir,true); assertTrue(fs.mkdirs(new Path("/foo2"))); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkPreviousDirExistence(sharedDir,true); assertCTimesEqual(cluster); Collection nn1NameDirs=cluster.getNameDirs(0); cluster.shutdown(); conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(nn1NameDirs)); NameNode.doRollback(conf,false); checkNnPreviousDirExistence(cluster,0,false); checkPreviousDirExistence(sharedDir,false); } finally { if (fs != null) { fs.close(); } if (cluster != null) { cluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRollbackWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster=null; FileSystem fs=null; try { Builder builder=new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder().numDataNodes(0); qjCluster=builder.build(); MiniDFSCluster cluster=qjCluster.getDfsCluster(); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); checkJnPreviousDirExistence(qjCluster,false); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkJnPreviousDirExistence(qjCluster,true); assertTrue(fs.mkdirs(new Path("/foo2"))); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkJnPreviousDirExistence(qjCluster,true); assertCTimesEqual(cluster); Collection nn1NameDirs=cluster.getNameDirs(0); cluster.shutdown(); conf.setStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,Joiner.on(",").join(nn1NameDirs)); NameNode.doRollback(conf,false); checkNnPreviousDirExistence(cluster,0,false); checkJnPreviousDirExistence(qjCluster,false); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that even if the NN which initiated the upgrade is in the standby * state that we're allowed to finalize. */ @Test public void testFinalizeFromSecondNameNodeWithJournalNodes() throws IOException, URISyntaxException { MiniQJMHACluster qjCluster=null; FileSystem fs=null; try { Builder builder=new MiniQJMHACluster.Builder(conf); builder.getDfsBuilder().numDataNodes(0); qjCluster=builder.build(); MiniDFSCluster cluster=qjCluster.getDfsCluster(); checkJnPreviousDirExistence(qjCluster,false); checkClusterPreviousDirExistence(cluster,false); assertCTimesEqual(cluster); cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/foo1"))); cluster.shutdownNameNode(1); cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE); cluster.restartNameNode(0,false); checkNnPreviousDirExistence(cluster,0,true); checkNnPreviousDirExistence(cluster,1,false); checkJnPreviousDirExistence(qjCluster,true); int rc=BootstrapStandby.run(new String[]{"-force"},cluster.getConfiguration(1)); assertEquals(0,rc); cluster.restartNameNode(1); cluster.transitionToStandby(0); cluster.transitionToActive(1); runFinalizeCommand(cluster); checkClusterPreviousDirExistence(cluster,false); checkJnPreviousDirExistence(qjCluster,false); assertCTimesEqual(cluster); } finally { if (fs != null) { fs.close(); } if (qjCluster != null) { qjCluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestDFSZKFailoverController

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testManualFailoverWithDFSHAAdmin() throws Exception { DFSHAAdmin tool=new DFSHAAdmin(); tool.setConf(conf); assertEquals(0,tool.run(new String[]{"-failover","nn1","nn2"})); waitForHAState(0,HAServiceState.STANDBY); waitForHAState(1,HAServiceState.ACTIVE); assertEquals(0,tool.run(new String[]{"-failover","nn2","nn1"})); waitForHAState(0,HAServiceState.ACTIVE); waitForHAState(1,HAServiceState.STANDBY); }

TestInitializer InternalCallVerifier EqualityVerifier HybridVerifier 
@Before public void setup() throws Exception { conf=new Configuration(); conf.set(ZKFailoverController.ZK_QUORUM_KEY + ".ns1",hostPort); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,AlwaysSucceedFencer.class.getName()); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,0); conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1",10023); conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2",10024); MiniDFSNNTopology topology=new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021)).addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022))); cluster=new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build(); cluster.waitActive(); ctx=new TestContext(); ctx.addThread(thr1=new ZKFCThread(ctx,0)); assertEquals(0,thr1.zkfc.run(new String[]{"-formatZK"})); thr1.start(); waitForHAState(0,HAServiceState.ACTIVE); ctx.addThread(thr2=new ZKFCThread(ctx,1)); thr2.start(); ZKFCTestUtil.waitForHealthState(thr1.zkfc,HealthMonitor.State.SERVICE_HEALTHY,ctx); ZKFCTestUtil.waitForHealthState(thr2.zkfc,HealthMonitor.State.SERVICE_HEALTHY,ctx); fs=HATestUtil.configureFailoverFs(cluster,conf); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that automatic failover is triggered by shutting the * active NN down. */ @Test(timeout=60000) public void testFailoverAndBackOnNNShutdown() throws Exception { Path p1=new Path("/dir1"); Path p2=new Path("/dir2"); fs.mkdirs(p1); cluster.shutdownNameNode(0); assertTrue(fs.exists(p1)); fs.mkdirs(p2); assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),thr1.zkfc.getLocalTarget().getAddress()); cluster.restartNameNode(0); waitForHAState(0,HAServiceState.STANDBY); assertTrue(fs.exists(p1)); assertTrue(fs.exists(p2)); cluster.shutdownNameNode(1); waitForHAState(0,HAServiceState.ACTIVE); assertTrue(fs.exists(p1)); assertTrue(fs.exists(p2)); assertEquals(AlwaysSucceedFencer.getLastFencedService().getAddress(),thr2.zkfc.getLocalTarget().getAddress()); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestDNFencing

InternalCallVerifier EqualityVerifier 
/** * Another regression test for HDFS-2742. This tests the following sequence: * - DN does a block report while file is open. This BR contains * the block in RBW state. * - The block report is delayed in reaching the standby. * - The file is closed. * - The standby processes the OP_ADD and OP_CLOSE operations before * the RBW block report arrives. * - The standby should not mark the block as corrupt. */ @Test public void testRBWReportArrivesAfterEdits() throws Exception { final CountDownLatch brFinished=new CountDownLatch(1); DelayAnswer delayer=new GenericTestUtils.DelayAnswer(LOG){ @Override protected Object passThrough( InvocationOnMock invocation) throws Throwable { try { return super.passThrough(invocation); } finally { brFinished.countDown(); } } } ; FSDataOutputStream out=fs.create(TEST_FILE_PATH); try { AppendTestUtil.write(out,0,10); out.hflush(); DataNode dn=cluster.getDataNodes().get(0); DatanodeProtocolClientSideTranslatorPB spy=DataNodeTestUtils.spyOnBposToNN(dn,nn2); Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.anyObject(),Mockito.anyString(),Mockito.anyObject()); dn.scheduleAllBlockReport(0); delayer.waitForCall(); } finally { IOUtils.closeStream(out); } cluster.transitionToStandby(0); cluster.transitionToActive(1); delayer.proceed(); brFinished.await(); BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager()); BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager()); assertEquals(0,nn1.getNamesystem().getCorruptReplicaBlocks()); assertEquals(0,nn2.getNamesystem().getCorruptReplicaBlocks()); DFSTestUtil.readFile(fs,TEST_FILE_PATH); }

InternalCallVerifier EqualityVerifier 
/** * Test that, when a block is re-opened for append, the related * datanode messages are correctly queued by the SBN because * they have future states and genstamps. */ @Test public void testQueueingWithAppend() throws Exception { int numQueued=0; int numDN=cluster.getDataNodes().size(); FSDataOutputStream out=fs.create(TEST_FILE_PATH); try { AppendTestUtil.write(out,0,10); out.hflush(); numQueued+=numDN; } finally { IOUtils.closeStream(out); numQueued+=numDN; } cluster.triggerBlockReports(); numQueued+=numDN; try { out=fs.append(TEST_FILE_PATH); AppendTestUtil.write(out,10,10); numQueued+=numDN; } finally { IOUtils.closeStream(out); numQueued+=numDN; } cluster.triggerBlockReports(); numQueued+=numDN; assertEquals(numQueued,cluster.getNameNode(1).getNamesystem().getPendingDataNodeMessageCount()); cluster.transitionToStandby(0); cluster.transitionToActive(1); BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager()); BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager()); assertEquals(0,nn1.getNamesystem().getCorruptReplicaBlocks()); assertEquals(0,nn2.getNamesystem().getCorruptReplicaBlocks()); AppendTestUtil.check(fs,TEST_FILE_PATH,20); }

InternalCallVerifier EqualityVerifier 
/** * Test case which restarts the standby node in such a way that, * when it exits safemode, it will want to invalidate a bunch * of over-replicated block replicas. Ensures that if we failover * at this point it won't lose data. */ @Test public void testNNClearsCommandsOnFailoverAfterStartup() throws Exception { DFSTestUtil.createFile(fs,TEST_FILE_PATH,30 * SMALL_BLOCK,(short)3,1L); banner("Shutting down NN2"); cluster.shutdownNameNode(1); banner("Setting replication to 1, rolling edit log."); nn1.getRpcServer().setReplication(TEST_FILE,(short)1); nn1.getRpcServer().rollEditLog(); banner("Starting NN2 again."); cluster.restartNameNode(1); nn2=cluster.getNameNode(1); banner("triggering BRs"); cluster.triggerBlockReports(); banner("computing invalidation on nn1"); BlockManagerTestUtil.computeInvalidationWork(nn1.getNamesystem().getBlockManager()); banner("computing invalidation on nn2"); BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager()); banner("Metadata immediately before failover"); doMetasave(nn2); banner("Failing to NN2 but let NN1 continue to think it's active"); NameNodeAdapter.abortEditLogs(nn1); NameNodeAdapter.enterSafeMode(nn1,false); cluster.transitionToActive(1); assertEquals(1,nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication()); banner("Metadata immediately after failover"); doMetasave(nn2); banner("Triggering heartbeats and block reports so that fencing is completed"); cluster.triggerHeartbeats(); cluster.triggerBlockReports(); banner("Metadata after nodes have all block-reported"); doMetasave(nn2); assertEquals(0,nn2.getNamesystem().getPostponedMisreplicatedBlocks()); BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager()); HATestUtil.waitForNNToIssueDeletions(nn2); cluster.triggerHeartbeats(); HATestUtil.waitForDNDeletions(cluster); cluster.triggerDeletionReports(); assertEquals(0,nn2.getNamesystem().getUnderReplicatedBlocks()); assertEquals(0,nn2.getNamesystem().getPendingReplicationBlocks()); banner("Making sure the file is still readable"); FileSystem fs2=cluster.getFileSystem(1); DFSTestUtil.readFile(fs2,TEST_FILE_PATH); }

InternalCallVerifier EqualityVerifier 
/** * Regression test for HDFS-2742. The issue in this bug was: * - DN does a block report while file is open. This BR contains * the block in RBW state. * - Standby queues the RBW state in PendingDatanodeMessages * - Standby processes edit logs during failover. Before fixing * this bug, it was mistakenly applying the RBW reported state * after the block had been completed, causing the block to get * marked corrupt. Instead, we should now be applying the RBW * message on OP_ADD, and then the FINALIZED message on OP_CLOSE. */ @Test public void testBlockReportsWhileFileBeingWritten() throws Exception { FSDataOutputStream out=fs.create(TEST_FILE_PATH); try { AppendTestUtil.write(out,0,10); out.hflush(); cluster.triggerBlockReports(); } finally { IOUtils.closeStream(out); } cluster.transitionToStandby(0); cluster.transitionToActive(1); BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager()); BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager()); assertEquals(0,nn1.getNamesystem().getCorruptReplicaBlocks()); assertEquals(0,nn2.getNamesystem().getCorruptReplicaBlocks()); DFSTestUtil.readFile(fs,TEST_FILE_PATH); }

InternalCallVerifier EqualityVerifier 
@Test public void testDnFencing() throws Exception { DFSTestUtil.createFile(fs,TEST_FILE_PATH,30 * SMALL_BLOCK,(short)3,1L); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,TEST_FILE_PATH); nn1.getRpcServer().setReplication(TEST_FILE,(short)1); BlockManagerTestUtil.computeInvalidationWork(nn1.getNamesystem().getBlockManager()); cluster.triggerHeartbeats(); banner("Failing to NN2 but let NN1 continue to think it's active"); NameNodeAdapter.abortEditLogs(nn1); NameNodeAdapter.enterSafeMode(nn1,false); cluster.transitionToActive(1); assertEquals(1,nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication()); banner("NN2 Metadata immediately after failover"); doMetasave(nn2); assertEquals(30,nn2.getNamesystem().getPostponedMisreplicatedBlocks()); banner("Triggering heartbeats and block reports so that fencing is completed"); cluster.triggerHeartbeats(); cluster.triggerBlockReports(); banner("Metadata after nodes have all block-reported"); doMetasave(nn2); assertEquals(0,nn2.getNamesystem().getPostponedMisreplicatedBlocks()); BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager()); cluster.triggerHeartbeats(); HATestUtil.waitForDNDeletions(cluster); cluster.triggerDeletionReports(); assertEquals(0,nn2.getNamesystem().getUnderReplicatedBlocks()); assertEquals(0,nn2.getNamesystem().getPendingReplicationBlocks()); banner("Making sure the file is still readable"); FileSystem fs2=cluster.getFileSystem(1); DFSTestUtil.readFile(fs2,TEST_FILE_PATH); banner("Waiting for the actual block files to get deleted from DNs."); waitForTrueReplication(cluster,block,1); }

InternalCallVerifier EqualityVerifier 
/** * Test case that reduces replication of a file with a lot of blocks * and then fails over right after those blocks enter the DN invalidation * queues on the active. Ensures that fencing is correct and no replicas * are lost. */ @Test public void testNNClearsCommandsOnFailoverWithReplChanges() throws Exception { DFSTestUtil.createFile(fs,TEST_FILE_PATH,30 * SMALL_BLOCK,(short)1,1L); banner("rolling NN1's edit log, forcing catch-up"); HATestUtil.waitForStandbyToCatchUp(nn1,nn2); nn1.getRpcServer().setReplication(TEST_FILE,(short)2); while (BlockManagerTestUtil.getComputedDatanodeWork(nn1.getNamesystem().getBlockManager()) > 0) { LOG.info("Getting more replication work computed"); } BlockManager bm1=nn1.getNamesystem().getBlockManager(); while (bm1.getPendingReplicationBlocksCount() > 0) { BlockManagerTestUtil.updateState(bm1); cluster.triggerHeartbeats(); Thread.sleep(1000); } banner("triggering BRs"); cluster.triggerBlockReports(); nn1.getRpcServer().setReplication(TEST_FILE,(short)1); banner("computing invalidation on nn1"); BlockManagerTestUtil.computeInvalidationWork(nn1.getNamesystem().getBlockManager()); doMetasave(nn1); banner("computing invalidation on nn2"); BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager()); doMetasave(nn2); banner("Metadata immediately before failover"); doMetasave(nn2); banner("Failing to NN2 but let NN1 continue to think it's active"); NameNodeAdapter.abortEditLogs(nn1); NameNodeAdapter.enterSafeMode(nn1,false); BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager()); cluster.transitionToActive(1); assertEquals(1,nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication()); banner("Metadata immediately after failover"); doMetasave(nn2); banner("Triggering heartbeats and block reports so that fencing is completed"); cluster.triggerHeartbeats(); cluster.triggerBlockReports(); banner("Metadata after nodes have all block-reported"); doMetasave(nn2); assertEquals(0,nn2.getNamesystem().getPostponedMisreplicatedBlocks()); BlockManagerTestUtil.computeInvalidationWork(nn2.getNamesystem().getBlockManager()); HATestUtil.waitForNNToIssueDeletions(nn2); cluster.triggerHeartbeats(); HATestUtil.waitForDNDeletions(cluster); cluster.triggerDeletionReports(); assertEquals(0,nn2.getNamesystem().getUnderReplicatedBlocks()); assertEquals(0,nn2.getNamesystem().getPendingReplicationBlocks()); banner("Making sure the file is still readable"); FileSystem fs2=cluster.getFileSystem(1); DFSTestUtil.readFile(fs2,TEST_FILE_PATH); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestDelegationTokensWithHA

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an * exception if the URI is a logical URI. This bug fails the combination of * ha + mapred + security. */ @Test public void testDFSGetCanonicalServiceName() throws Exception { URI hAUri=HATestUtil.getLogicalUri(cluster); String haService=HAUtil.buildTokenServiceForLogicalUri(hAUri,HdfsConstants.HDFS_URI_SCHEME).toString(); assertEquals(haService,dfs.getCanonicalServiceName()); final String renewer=UserGroupInformation.getCurrentUser().getShortUserName(); final Token token=getDelegationToken(dfs,renewer); assertEquals(haService,token.getService().toString()); token.renew(dfs.getConf()); token.cancel(dfs.getConf()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testHdfsGetCanonicalServiceName() throws Exception { Configuration conf=dfs.getConf(); URI haUri=HATestUtil.getLogicalUri(cluster); AbstractFileSystem afs=AbstractFileSystem.createFileSystem(haUri,conf); String haService=HAUtil.buildTokenServiceForLogicalUri(haUri,HdfsConstants.HDFS_URI_SCHEME).toString(); assertEquals(haService,afs.getCanonicalServiceName()); Token token=afs.getDelegationTokens(UserGroupInformation.getCurrentUser().getShortUserName()).get(0); assertEquals(haService,token.getService().toString()); token.renew(conf); token.cancel(conf); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testHAUtilClonesDelegationTokens() throws Exception { final Token token=getDelegationToken(fs,"JobTracker"); UserGroupInformation ugi=UserGroupInformation.createRemoteUser("test"); URI haUri=new URI("hdfs://my-ha-uri/"); token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri,HdfsConstants.HDFS_URI_SCHEME)); ugi.addToken(token); Collection nnAddrs=new HashSet(); nnAddrs.add(new InetSocketAddress("localhost",nn0.getNameNodeAddress().getPort())); nnAddrs.add(new InetSocketAddress("localhost",nn1.getNameNodeAddress().getPort())); HAUtil.cloneDelegationTokenForLogicalUri(ugi,haUri,nnAddrs); Collection> tokens=ugi.getTokens(); assertEquals(3,tokens.size()); LOG.info("Tokens:\n" + Joiner.on("\n").join(tokens)); DelegationTokenSelector dts=new DelegationTokenSelector(); for ( InetSocketAddress addr : nnAddrs) { Text ipcDtService=SecurityUtil.buildTokenService(addr); Token token2=dts.selectToken(ipcDtService,ugi.getTokens()); assertNotNull(token2); assertArrayEquals(token.getIdentifier(),token2.getIdentifier()); assertArrayEquals(token.getPassword(),token2.getPassword()); } SecurityUtilTestHelper.setTokenServiceUseIp(false); for ( InetSocketAddress addr : nnAddrs) { Text ipcDtService=SecurityUtil.buildTokenService(addr); Token token2=dts.selectToken(ipcDtService,ugi.getTokens()); assertNull(token2); } HAUtil.cloneDelegationTokenForLogicalUri(ugi,haUri,nnAddrs); for ( InetSocketAddress addr : nnAddrs) { Text ipcDtService=SecurityUtil.buildTokenService(addr); Token token2=dts.selectToken(ipcDtService,ugi.getTokens()); assertNotNull(token2); assertArrayEquals(token.getIdentifier(),token2.getIdentifier()); assertArrayEquals(token.getPassword(),token2.getPassword()); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestFailoverWithBlockTokensEnabled

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException { cluster.transitionToActive(0); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); DFSTestUtil.writeFile(fs,TEST_PATH,TEST_DATA); assertEquals(TEST_DATA,DFSTestUtil.readFile(fs,TEST_PATH)); DFSClient dfsClient=DFSClientAdapter.getDFSClient((DistributedFileSystem)fs); DFSClient spyDfsClient=Mockito.spy(dfsClient); Mockito.doAnswer(new Answer(){ @Override public LocatedBlocks answer( InvocationOnMock arg0) throws Throwable { LocatedBlocks locatedBlocks=(LocatedBlocks)arg0.callRealMethod(); for ( LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { Token token=lb.getBlockToken(); BlockTokenIdentifier id=lb.getBlockToken().decodeIdentifier(); id.setExpiryDate(Time.now() + 10); Token newToken=new Token(id.getBytes(),token.getPassword(),token.getKind(),token.getService()); lb.setBlockToken(newToken); } return locatedBlocks; } } ).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(),Mockito.anyLong(),Mockito.anyLong()); DFSClientAdapter.setDFSClient((DistributedFileSystem)fs,spyDfsClient); try { assertEquals(TEST_DATA,DFSTestUtil.readFile(fs,TEST_PATH)); fail("Shouldn't have been able to read a file with invalid block tokens"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block",ioe); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestFailureOfSharedDir

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that marking the shared edits dir as being "required" causes the NN to * fail if that dir can't be accessed. */ @Test public void testFailureOfSharedDir() throws Exception { Configuration conf=new Configuration(); conf.setLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,2000); MiniDFSCluster cluster=null; File sharedEditsDir=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).checkExitOnShutdown(false).build(); cluster.waitActive(); cluster.transitionToActive(0); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); assertTrue(fs.mkdirs(new Path("/test1"))); URI sharedEditsUri=cluster.getSharedEditsDir(0,1); sharedEditsDir=new File(sharedEditsUri); assertEquals(0,FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"-w",true)); Thread.sleep(conf.getLong(DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT) * 2); NameNode nn1=cluster.getNameNode(1); assertTrue(nn1.isStandbyState()); assertFalse("StandBy NameNode should not go to SafeMode on resource unavailability",nn1.isInSafeMode()); NameNode nn0=cluster.getNameNode(0); try { nn0.getRpcServer().rollEditLog(); fail("Succeeded in rolling edit log despite shared dir being deleted"); } catch ( ExitException ee) { GenericTestUtils.assertExceptionContains("finalize log segment 1, 3 failed for required journal",ee); } for ( URI editsUri : cluster.getNameEditsDirs(0)) { if (editsUri.equals(sharedEditsUri)) { continue; } File editsDir=new File(editsUri.getPath()); File curDir=new File(editsDir,"current"); GenericTestUtils.assertGlobEquals(curDir,"edits_.*",NNStorage.getInProgressEditsFileName(1)); } } finally { if (sharedEditsDir != null) { FileUtil.chmod(sharedEditsDir.getAbsolutePath(),"+w",true); } if (cluster != null) { cluster.shutdown(); } } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Multiple shared edits directories is an invalid configuration. */ @Test public void testMultipleSharedDirsFails() throws Exception { Configuration conf=new Configuration(); URI sharedA=new URI("file:///shared-A"); URI sharedB=new URI("file:///shared-B"); URI localA=new URI("file:///local-A"); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,Joiner.on(",").join(sharedA,sharedB)); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,localA.toString()); try { FSNamesystem.getNamespaceEditsDirs(conf); fail("Allowed multiple shared edits directories"); } catch ( IOException ioe) { assertEquals("Multiple shared edits directories are not yet supported",ioe.getMessage()); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Make sure that the shared edits dirs are listed before non-shared dirs * when the configuration is parsed. This ensures that the shared journals * are synced before the local ones. */ @Test public void testSharedDirsComeFirstInEditsList() throws Exception { Configuration conf=new Configuration(); URI sharedA=new URI("file:///shared-A"); URI localA=new URI("file:///local-A"); URI localB=new URI("file:///local-B"); URI localC=new URI("file:///local-C"); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,sharedA.toString()); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,Joiner.on(",").join(localC,localB,localA)); List dirs=FSNamesystem.getNamespaceEditsDirs(conf); assertEquals("Shared dirs should come first, then local dirs, in the order " + "they were listed in the configuration.",Joiner.on(",").join(sharedA,localC,localB,localA),Joiner.on(",").join(dirs)); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestHAAppend

InternalCallVerifier EqualityVerifier 
/** * Test to verify the processing of PendingDataNodeMessageQueue in case of * append. One block will marked as corrupt if the OP_ADD, OP_UPDATE_BLOCKS * comes in one edit log segment and OP_CLOSE edit comes in next log segment * which is loaded during failover. Regression test for HDFS-3605. */ @Test public void testMultipleAppendsDuringCatchupTailing() throws Exception { Configuration conf=new Configuration(); conf.set(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,"5000"); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY,-1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build(); FileSystem fs=null; try { cluster.transitionToActive(0); fs=HATestUtil.configureFailoverFs(cluster,conf); Path fileToAppend=new Path("/FileToAppend"); FSDataOutputStream out=fs.create(fileToAppend); out.writeBytes("/data"); out.hflush(); cluster.getNameNode(0).getRpcServer().rollEditLog(); cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits(); out.close(); for (int i=0; i < 5; i++) { DFSTestUtil.appendFile(fs,fileToAppend,"data"); } cluster.triggerBlockReports(); cluster.shutdownNameNode(0); cluster.transitionToActive(1); int rc=ToolRunner.run(new DFSck(cluster.getConfiguration(1)),new String[]{"/","-files","-blocks"}); assertEquals(0,rc); assertEquals("CorruptBlocks should be empty.",0,cluster.getNameNode(1).getNamesystem().getCorruptReplicaBlocks()); } finally { if (null != cluster) { cluster.shutdown(); } if (null != fs) { fs.close(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestHAConfiguration

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Tests that the namenode edits dirs and shared edits dirs are gotten with * duplicates removed */ @Test public void testHAUniqueEditDirs() throws IOException { Configuration conf=new Configuration(); conf.set(DFS_NAMENODE_EDITS_DIR_KEY,"file://edits/dir, " + "file://edits/shared/dir"); conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY,"file://edits/shared/dir"); Collection editsDirs=FSNamesystem.getNamespaceEditsDirs(conf); assertEquals(2,editsDirs.size()); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetOtherNNHttpAddress() throws IOException { Configuration conf=getHAConf("ns1","1.2.3.1","1.2.3.2"); conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID,"ns1"); NameNode.initializeGenericKeys(conf,"ns1","nn1"); StandbyCheckpointer checkpointer=new StandbyCheckpointer(conf,fsn); assertEquals(new URL("http","1.2.3.2",DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,""),checkpointer.getActiveNNAddress()); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestHAMetrics

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHAMetrics() throws Exception { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY,Integer.MAX_VALUE); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build(); FileSystem fs=null; try { cluster.waitActive(); FSNamesystem nn0=cluster.getNamesystem(0); FSNamesystem nn1=cluster.getNamesystem(1); assertEquals(nn0.getHAState(),"standby"); assertTrue(0 < nn0.getMillisSinceLastLoadedEdits()); assertEquals(nn1.getHAState(),"standby"); assertTrue(0 < nn1.getMillisSinceLastLoadedEdits()); cluster.transitionToActive(0); assertEquals("active",nn0.getHAState()); assertEquals(0,nn0.getMillisSinceLastLoadedEdits()); assertEquals("standby",nn1.getHAState()); assertTrue(0 < nn1.getMillisSinceLastLoadedEdits()); cluster.transitionToStandby(0); cluster.transitionToActive(1); assertEquals("standby",nn0.getHAState()); assertTrue(0 < nn0.getMillisSinceLastLoadedEdits()); assertEquals("active",nn1.getHAState()); assertEquals(0,nn1.getMillisSinceLastLoadedEdits()); Thread.sleep(2000); assertTrue(2000 <= nn0.getMillisSinceLastLoadedEdits()); assertEquals(0,nn0.getPendingDataNodeMessageCount()); assertEquals(0,nn1.getPendingDataNodeMessageCount()); fs=HATestUtil.configureFailoverFs(cluster,conf); DFSTestUtil.createFile(fs,new Path("/foo"),10,(short)1,1L); assertTrue(0 < nn0.getPendingDataNodeMessageCount()); assertEquals(0,nn1.getPendingDataNodeMessageCount()); long millisSinceLastLoadedEdits=nn0.getMillisSinceLastLoadedEdits(); HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(1),cluster.getNameNode(0)); assertEquals(0,nn0.getPendingDataNodeMessageCount()); assertEquals(0,nn1.getPendingDataNodeMessageCount()); long newMillisSinceLastLoadedEdits=nn0.getMillisSinceLastLoadedEdits(); assertTrue("expected " + millisSinceLastLoadedEdits + " > "+ newMillisSinceLastLoadedEdits,millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits); } finally { IOUtils.cleanup(LOG,fs); cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestHASafeMode

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure that when we transition to active in safe mode that we don't * prematurely consider blocks missing just because not all DNs have reported * yet. * This is a regression test for HDFS-3921. */ @Test public void testNoPopulatingReplQueuesWhenStartingActiveInSafeMode() throws IOException { DFSTestUtil.createFile(fs,new Path("/test"),15 * BLOCK_SIZE,(short)3,1L); cluster.stopDataNode(1); cluster.restartNameNode(0,false); cluster.transitionToActive(0); assertTrue(cluster.getNameNode(0).isInSafeMode()); assertEquals(0,cluster.getNamesystem(0).getMissingBlocksCount()); }

InternalCallVerifier EqualityVerifier 
/** * Regression test for HDFS-2804: standby should not populate replication * queues when exiting safe mode. */ @Test public void testNoPopulatingReplQueuesWhenExitingSafemode() throws Exception { DFSTestUtil.createFile(fs,new Path("/test"),15 * BLOCK_SIZE,(short)3,1L); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false); NameNodeAdapter.saveNamespace(nn1); nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_LEAVE,false); DFSTestUtil.createFile(fs,new Path("/test2"),15 * BLOCK_SIZE,(short)3,1L); nn0.getRpcServer().rollEditLog(); cluster.stopDataNode(1); cluster.shutdownNameNode(1); cluster.restartNameNode(1,false); nn1=cluster.getNameNode(1); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ return !nn1.isInSafeMode(); } } ,100,10000); BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager()); assertEquals(0L,nn1.getNamesystem().getUnderReplicatedBlocks()); assertEquals(0L,nn1.getNamesystem().getPendingReplicationBlocks()); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestHAStateTransitions

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for HDFS-2812. Since lease renewals go from the client * only to the active NN, the SBN will have out-of-date lease * info when it becomes active. We need to make sure we don't * accidentally mark the leases as expired when the failover * proceeds. */ @Test(timeout=120000) public void testLeasesRenewedOnTransition() throws Exception { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build(); FSDataOutputStream stm=null; FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); NameNode nn0=cluster.getNameNode(0); NameNode nn1=cluster.getNameNode(1); try { cluster.waitActive(); cluster.transitionToActive(0); LOG.info("Starting with NN 0 active"); stm=fs.create(TEST_FILE_PATH); long nn0t0=NameNodeAdapter.getLeaseRenewalTime(nn0,TEST_FILE_STR); assertTrue(nn0t0 > 0); long nn1t0=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR); assertEquals("Lease should not yet exist on nn1",-1,nn1t0); Thread.sleep(5); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); long nn1t1=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR); assertTrue("Lease should have been created on standby. Time was: " + nn1t1,nn1t1 > nn0t0); Thread.sleep(5); LOG.info("Failing over to NN 1"); cluster.transitionToStandby(0); cluster.transitionToActive(1); long nn1t2=NameNodeAdapter.getLeaseRenewalTime(nn1,TEST_FILE_STR); assertTrue("Lease should have been renewed by failover process",nn1t2 > nn1t1); } finally { IOUtils.closeStream(stm); cluster.shutdown(); } }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * This test also serves to test{@link HAUtil#getProxiesForAllNameNodesInNameservice(Configuration,String)} and{@link DFSUtil#getRpcAddressesForNameserviceId(Configuration,String,String)}by virtue of the fact that it wouldn't work properly if the proxies * returned were not for the correct NNs. */ @Test public void testIsAtLeastOneActive() throws Exception { MiniDFSCluster cluster=new MiniDFSCluster.Builder(new HdfsConfiguration()).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build(); try { Configuration conf=new HdfsConfiguration(); HATestUtil.setFailoverConfigurations(cluster,conf); List namenodes=HAUtil.getProxiesForAllNameNodesInNameservice(conf,HATestUtil.getLogicalHostname(cluster)); assertEquals(2,namenodes.size()); assertFalse(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToActive(0); assertTrue(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToStandby(0); assertFalse(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToActive(1); assertTrue(HAUtil.isAtLeastOneActive(namenodes)); cluster.transitionToStandby(1); assertFalse(HAUtil.isAtLeastOneActive(namenodes)); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestPendingCorruptDnMessages

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testChangedStorageId() throws IOException, URISyntaxException, InterruptedException { HdfsConfiguration conf=new HdfsConfiguration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).nnTopology(MiniDFSNNTopology.simpleHATopology()).build(); try { cluster.transitionToActive(0); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); OutputStream out=fs.create(filePath); out.write("foo bar baz".getBytes()); out.close(); HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),cluster.getNameNode(1)); ExtendedBlock block=DFSTestUtil.getFirstBlock(fs,filePath); assertTrue(MiniDFSCluster.changeGenStampOfBlock(0,block,900)); DataNodeProperties dnProps=cluster.stopDataNode(0); cluster.restartNameNode(1,false); assertTrue(cluster.restartDataNode(dnProps,true)); while (cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount() < 1) { ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); } assertEquals(1,cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount()); String oldStorageId=getRegisteredDatanodeUid(cluster,1); assertTrue(wipeAndRestartDn(cluster,0)); String newStorageId=""; do { ThreadUtil.sleepAtLeastIgnoreInterrupts(1000); newStorageId=getRegisteredDatanodeUid(cluster,1); System.out.println("====> oldStorageId: " + oldStorageId + " newStorageId: "+ newStorageId); } while (newStorageId.equals(oldStorageId)); assertEquals(0,cluster.getNamesystem(1).getBlockManager().getPendingDataNodeMessageCount()); cluster.transitionToStandby(0); cluster.transitionToActive(1); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestQuotasWithHA

APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Test that quotas are properly tracked by the standby through * create, append, delete. */ @Test(timeout=60000) public void testQuotasTrackedOnStandby() throws Exception { fs.mkdirs(TEST_DIR); DistributedFileSystem dfs=(DistributedFileSystem)fs; dfs.setQuota(TEST_DIR,NS_QUOTA,DS_QUOTA); long expectedSize=3 * BLOCK_SIZE + BLOCK_SIZE / 2; DFSTestUtil.createFile(fs,TEST_FILE,expectedSize,(short)1,1L); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); ContentSummary cs=nn1.getRpcServer().getContentSummary(TEST_DIR_STR); assertEquals(NS_QUOTA,cs.getQuota()); assertEquals(DS_QUOTA,cs.getSpaceQuota()); assertEquals(expectedSize,cs.getSpaceConsumed()); assertEquals(1,cs.getDirectoryCount()); assertEquals(1,cs.getFileCount()); FSDataOutputStream stm=fs.append(TEST_FILE); try { byte[] data=new byte[(int)(BLOCK_SIZE * 3 / 2)]; stm.write(data); expectedSize+=data.length; } finally { IOUtils.closeStream(stm); } HATestUtil.waitForStandbyToCatchUp(nn0,nn1); cs=nn1.getRpcServer().getContentSummary(TEST_DIR_STR); assertEquals(NS_QUOTA,cs.getQuota()); assertEquals(DS_QUOTA,cs.getSpaceQuota()); assertEquals(expectedSize,cs.getSpaceConsumed()); assertEquals(1,cs.getDirectoryCount()); assertEquals(1,cs.getFileCount()); fs.delete(TEST_FILE,true); expectedSize=0; HATestUtil.waitForStandbyToCatchUp(nn0,nn1); cs=nn1.getRpcServer().getContentSummary(TEST_DIR_STR); assertEquals(NS_QUOTA,cs.getQuota()); assertEquals(DS_QUOTA,cs.getSpaceQuota()); assertEquals(expectedSize,cs.getSpaceConsumed()); assertEquals(1,cs.getDirectoryCount()); assertEquals(0,cs.getFileCount()); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestRetryCacheWithHA

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * 1. Run a set of operations * 2. Trigger the NN failover * 3. Check the retry cache on the original standby NN */ @Test(timeout=60000) public void testRetryCacheOnStandbyNN() throws Exception { DFSTestUtil.runOperations(cluster,dfs,conf,BlockSize,0); FSNamesystem fsn0=cluster.getNamesystem(0); LightWeightCache cacheSet=(LightWeightCache)fsn0.getRetryCache().getCacheSet(); assertEquals(23,cacheSet.size()); Map oldEntries=new HashMap(); Iterator iter=cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry=iter.next(); oldEntries.put(entry,entry); } cluster.getNameNode(0).getRpcServer().rollEditLog(); cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits(); cluster.shutdownNameNode(0); cluster.transitionToActive(1); FSNamesystem fsn1=cluster.getNamesystem(1); cacheSet=(LightWeightCache)fsn1.getRetryCache().getCacheSet(); assertEquals(23,cacheSet.size()); iter=cacheSet.iterator(); while (iter.hasNext()) { CacheEntry entry=iter.next(); assertTrue(oldEntries.containsKey(entry)); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyBlockManagement

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testInvalidateBlock() throws Exception { Configuration conf=new Configuration(); HAUtil.setAllowStandbyReads(conf,true); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build(); try { cluster.waitActive(); cluster.transitionToActive(0); NameNode nn1=cluster.getNameNode(0); NameNode nn2=cluster.getNameNode(1); FileSystem fs=HATestUtil.configureFailoverFs(cluster,conf); Thread.sleep(1000); LOG.info("=================================="); DFSTestUtil.writeFile(fs,TEST_FILE_PATH,TEST_FILE_DATA); nn1.getRpcServer().rollEditLog(); LOG.info("=================================="); fs.delete(TEST_FILE_PATH,false); BlockManagerTestUtil.computeAllPendingWork(nn1.getNamesystem().getBlockManager()); nn1.getRpcServer().rollEditLog(); assertEquals(0,nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount()); cluster.triggerHeartbeats(); cluster.triggerBlockReports(); assertEquals(0,nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount()); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints

EqualityVerifier 
@Test public void testSBNCheckpoints() throws Exception { JournalSet standbyJournalSet=NameNodeAdapter.spyOnJournalSet(nn1); doEdits(0,10); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(12)); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ if (tmpOivImgDir.list().length > 0) { return true; } else { return false; } } } ,1000,60000); assertEquals("One file is expected",1,tmpOivImgDir.list().length); HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(12)); Mockito.verify(standbyJournalSet,Mockito.never()).purgeLogsOlderThan(Mockito.anyLong()); }

EqualityVerifier 
/** * Test for the case when both of the NNs in the cluster are * in the standby state, and thus are both creating checkpoints * and uploading them to each other. * In this circumstance, they should receive the error from the * other node indicating that the other node already has a * checkpoint for the given txid, but this should not cause * an abort, etc. */ @Test public void testBothNodesInStandbyState() throws Exception { doEdits(0,10); cluster.transitionToStandby(0); HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(12)); HATestUtil.waitForCheckpoint(cluster,0,ImmutableList.of(12)); assertEquals(12,nn0.getNamesystem().getFSImage().getMostRecentCheckpointTxId()); assertEquals(12,nn1.getNamesystem().getFSImage().getMostRecentCheckpointTxId()); List dirs=Lists.newArrayList(); dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster,0)); dirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster,1)); FSImageTestUtil.assertParallelFilesAreIdentical(dirs,ImmutableSet.of()); }

InternalCallVerifier EqualityVerifier 
/** * Test cancellation of ongoing checkpoints when failover happens * mid-checkpoint during image upload from standby to active NN. */ @Test(timeout=60000) public void testCheckpointCancellationDuringUpload() throws Exception { cluster.getConfiguration(0).setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,false); cluster.getConfiguration(1).setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,false); cluster.getConfiguration(1).setLong(DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY,100); cluster.restartNameNode(0); cluster.restartNameNode(1); nn0=cluster.getNameNode(0); nn1=cluster.getNameNode(1); cluster.transitionToActive(0); doEdits(0,100); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); HATestUtil.waitForCheckpoint(cluster,1,ImmutableList.of(104)); cluster.transitionToStandby(0); cluster.transitionToActive(1); cluster.shutdown(); cluster=null; GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ ThreadMXBean threadBean=ManagementFactory.getThreadMXBean(); ThreadInfo[] threads=threadBean.getThreadInfo(threadBean.getAllThreadIds(),1); for ( ThreadInfo thread : threads) { if (thread.getThreadName().startsWith("TransferFsImageUpload")) { return false; } } return true; } } ,1000,30000); assertEquals(0,nn0.getFSImage().getMostRecentCheckpointTxId()); }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyIsHot

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Regression test for HDFS-2795: * - Start an HA cluster with a DN. * - Write several blocks to the FS with replication 1. * - Shutdown the DN * - Wait for the NNs to declare the DN dead. All blocks will be under-replicated. * - Restart the DN. * In the bug, the standby node would only very slowly notice the blocks returning * to the cluster. */ @Test(timeout=60000) public void testDatanodeRestarts() throws Exception { Configuration conf=new Configuration(); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,1024); HAUtil.setAllowStandbyReads(conf,true); conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,0); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY,1); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build(); try { NameNode nn0=cluster.getNameNode(0); NameNode nn1=cluster.getNameNode(1); cluster.transitionToActive(0); DFSTestUtil.createFile(cluster.getFileSystem(0),TEST_FILE_PATH,5 * 1024,(short)1,1L); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); DataNode dn=cluster.getDataNodes().get(0); String dnName=dn.getDatanodeId().getXferAddr(); DataNodeProperties dnProps=cluster.stopDataNode(0); BlockManagerTestUtil.noticeDeadDatanode(nn0,dnName); BlockManagerTestUtil.noticeDeadDatanode(nn1,dnName); BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager()); BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager()); assertEquals(5,nn0.getNamesystem().getUnderReplicatedBlocks()); assertEquals(0,nn1.getNamesystem().getUnderReplicatedBlocks()); LocatedBlocks locs=nn1.getRpcServer().getBlockLocations(TEST_FILE,0,1); assertEquals("Standby should have registered that the block has no replicas",0,locs.get(0).getLocations().length); cluster.restartDataNode(dnProps); cluster.waitActive(0); cluster.waitActive(1); BlockManagerTestUtil.updateState(nn0.getNamesystem().getBlockManager()); BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager()); assertEquals(0,nn0.getNamesystem().getUnderReplicatedBlocks()); assertEquals(0,nn1.getNamesystem().getUnderReplicatedBlocks()); locs=nn1.getRpcServer().getBlockLocations(TEST_FILE,0,1); assertEquals("Standby should have registered that the block has replicas again",1,locs.get(0).getLocations().length); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.server.namenode.ha.TestXAttrsWithHA

InternalCallVerifier EqualityVerifier 
/** * Test that xattrs are properly tracked by the standby */ @Test(timeout=60000) public void testXAttrsTrackedOnStandby() throws Exception { fs.create(path).close(); fs.setXAttr(path,name1,value1,EnumSet.of(XAttrSetFlag.CREATE)); fs.setXAttr(path,name2,value2,EnumSet.of(XAttrSetFlag.CREATE)); HATestUtil.waitForStandbyToCatchUp(nn0,nn1); List xAttrs=nn1.getRpcServer().getXAttrs("/file",null); assertEquals(2,xAttrs.size()); cluster.shutdownNameNode(0); cluster.shutdownNameNode(0); cluster.transitionToActive(1); Map xattrs=fs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); fs.delete(path,true); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestAclWithSnapshot

InternalCallVerifier EqualityVerifier 
@Test public void testOriginalAclEnforcedForSnapshotContentsAfterRemoval() throws Exception { Path filePath=new Path(path,"file1"); Path subdirPath=new Path(path,"subdir1"); Path fileSnapshotPath=new Path(snapshotPath,"file1"); Path subdirSnapshotPath=new Path(snapshotPath,"subdir1"); FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0777)); FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close(); FileSystem.mkdirs(hdfs,subdirPath,FsPermission.createImmutable((short)0700)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_EXECUTE),aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE)); hdfs.setAcl(filePath,aclSpec); hdfs.setAcl(subdirPath,aclSpec); assertFilePermissionGranted(fsAsBruce,BRUCE,filePath); assertFilePermissionDenied(fsAsDiana,DIANA,filePath); assertDirPermissionGranted(fsAsBruce,BRUCE,subdirPath); assertDirPermissionDenied(fsAsDiana,DIANA,subdirPath); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)}; AclStatus s=hdfs.getAclStatus(filePath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission((short)010550,filePath); s=hdfs.getAclStatus(subdirPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission((short)010550,subdirPath); s=hdfs.getAclStatus(fileSnapshotPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission((short)010550,fileSnapshotPath); assertFilePermissionGranted(fsAsBruce,BRUCE,fileSnapshotPath); assertFilePermissionDenied(fsAsDiana,DIANA,fileSnapshotPath); s=hdfs.getAclStatus(subdirSnapshotPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission((short)010550,subdirSnapshotPath); assertDirPermissionGranted(fsAsBruce,BRUCE,subdirSnapshotPath); assertDirPermissionDenied(fsAsDiana,DIANA,subdirSnapshotPath); hdfs.removeAcl(filePath); hdfs.removeAcl(subdirPath); doSnapshotContentsRemovalAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath); restart(false); doSnapshotContentsRemovalAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath); restart(true); doSnapshotContentsRemovalAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath); }

InternalCallVerifier EqualityVerifier 
@Test public void testChangeAclExceedsQuota() throws Exception { Path filePath=new Path(path,"file1"); Path fileSnapshotPath=new Path(snapshotPath,"file1"); FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0755)); hdfs.allowSnapshot(path); hdfs.setQuota(path,3,HdfsConstants.QUOTA_DONT_SET); FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close(); hdfs.setPermission(filePath,FsPermission.createImmutable((short)0600)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ_WRITE)); hdfs.modifyAclEntries(filePath,aclSpec); hdfs.createSnapshot(path,snapshotName); AclStatus s=hdfs.getAclStatus(filePath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_WRITE),aclEntry(ACCESS,GROUP,NONE)},returned); assertPermission((short)010660,filePath); s=hdfs.getAclStatus(fileSnapshotPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_WRITE),aclEntry(ACCESS,GROUP,NONE)},returned); assertPermission((short)010660,filePath); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ)); exception.expect(NSQuotaExceededException.class); hdfs.modifyAclEntries(filePath,aclSpec); }

InternalCallVerifier EqualityVerifier 
@Test public void testOriginalAclEnforcedForSnapshotRootAfterRemoval() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE)); hdfs.setAcl(path,aclSpec); assertDirPermissionGranted(fsAsBruce,BRUCE,path); assertDirPermissionDenied(fsAsDiana,DIANA,path); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); AclStatus s=hdfs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)},returned); assertPermission((short)010750,path); s=hdfs.getAclStatus(snapshotPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)},returned); assertPermission((short)010750,snapshotPath); assertDirPermissionGranted(fsAsBruce,BRUCE,snapshotPath); assertDirPermissionDenied(fsAsDiana,DIANA,snapshotPath); hdfs.removeAcl(path); doSnapshotRootRemovalAssertions(path,snapshotPath); restart(false); doSnapshotRootRemovalAssertions(path,snapshotPath); restart(true); doSnapshotRootRemovalAssertions(path,snapshotPath); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveReadsCurrentState() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",ALL)); hdfs.modifyAclEntries(path,aclSpec); hdfs.removeAcl(path); AclEntry[] expected=new AclEntry[]{}; AclStatus s=hdfs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission((short)0700,path); assertDirPermissionDenied(fsAsBruce,BRUCE,path); assertDirPermissionDenied(fsAsDiana,DIANA,path); }

InternalCallVerifier EqualityVerifier 
@Test public void testOriginalAclEnforcedForSnapshotRootAfterChange() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE)); hdfs.setAcl(path,aclSpec); assertDirPermissionGranted(fsAsBruce,BRUCE,path); assertDirPermissionDenied(fsAsDiana,DIANA,path); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); AclStatus s=hdfs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)},returned); assertPermission((short)010750,path); s=hdfs.getAclStatus(snapshotPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)},returned); assertPermission((short)010750,snapshotPath); assertDirPermissionGranted(fsAsBruce,BRUCE,snapshotPath); assertDirPermissionDenied(fsAsDiana,DIANA,snapshotPath); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_EXECUTE),aclEntry(ACCESS,USER,"diana",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE)); hdfs.setAcl(path,aclSpec); doSnapshotRootChangeAssertions(path,snapshotPath); restart(false); doSnapshotRootChangeAssertions(path,snapshotPath); restart(true); doSnapshotRootChangeAssertions(path,snapshotPath); }

InternalCallVerifier EqualityVerifier 
@Test public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); List aclSpec=Lists.newArrayList(aclEntry(DEFAULT,USER,"bruce",READ_EXECUTE)); hdfs.modifyAclEntries(path,aclSpec); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); AclStatus s=hdfs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",READ_EXECUTE),aclEntry(DEFAULT,GROUP,NONE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010700,path); s=hdfs.getAclStatus(snapshotPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(DEFAULT,USER,ALL),aclEntry(DEFAULT,USER,"bruce",READ_EXECUTE),aclEntry(DEFAULT,GROUP,NONE),aclEntry(DEFAULT,MASK,READ_EXECUTE),aclEntry(DEFAULT,OTHER,NONE)},returned); assertPermission((short)010700,snapshotPath); assertDirPermissionDenied(fsAsBruce,BRUCE,snapshotPath); }

InternalCallVerifier EqualityVerifier 
@Test public void testModifyReadsCurrentState() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",ALL)); hdfs.modifyAclEntries(path,aclSpec); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"diana",READ_EXECUTE)); hdfs.modifyAclEntries(path,aclSpec); AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"bruce",ALL),aclEntry(ACCESS,USER,"diana",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)}; AclStatus s=hdfs.getAclStatus(path); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission((short)010770,path); assertDirPermissionGranted(fsAsBruce,BRUCE,path); assertDirPermissionGranted(fsAsDiana,DIANA,path); }

InternalCallVerifier EqualityVerifier 
@Test public void testRemoveAclExceedsQuota() throws Exception { Path filePath=new Path(path,"file1"); Path fileSnapshotPath=new Path(snapshotPath,"file1"); FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0755)); hdfs.allowSnapshot(path); hdfs.setQuota(path,3,HdfsConstants.QUOTA_DONT_SET); FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close(); hdfs.setPermission(filePath,FsPermission.createImmutable((short)0600)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ_WRITE)); hdfs.modifyAclEntries(filePath,aclSpec); hdfs.createSnapshot(path,snapshotName); AclStatus s=hdfs.getAclStatus(filePath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_WRITE),aclEntry(ACCESS,GROUP,NONE)},returned); assertPermission((short)010660,filePath); s=hdfs.getAclStatus(fileSnapshotPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_WRITE),aclEntry(ACCESS,GROUP,NONE)},returned); assertPermission((short)010660,filePath); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"bruce",READ)); exception.expect(NSQuotaExceededException.class); hdfs.removeAcl(filePath); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetAclStatusDotSnapshotPath() throws Exception { hdfs.mkdirs(path); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); AclStatus s=hdfs.getAclStatus(new Path(path,".snapshot")); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(new AclEntry[]{},returned); }

InternalCallVerifier EqualityVerifier 
@Test public void testOriginalAclEnforcedForSnapshotContentsAfterChange() throws Exception { Path filePath=new Path(path,"file1"); Path subdirPath=new Path(path,"subdir1"); Path fileSnapshotPath=new Path(snapshotPath,"file1"); Path subdirSnapshotPath=new Path(snapshotPath,"subdir1"); FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0777)); FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close(); FileSystem.mkdirs(hdfs,subdirPath,FsPermission.createImmutable((short)0700)); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_EXECUTE),aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE)); hdfs.setAcl(filePath,aclSpec); hdfs.setAcl(subdirPath,aclSpec); assertFilePermissionGranted(fsAsBruce,BRUCE,filePath); assertFilePermissionDenied(fsAsDiana,DIANA,filePath); assertDirPermissionGranted(fsAsBruce,BRUCE,subdirPath); assertDirPermissionDenied(fsAsDiana,DIANA,subdirPath); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); AclEntry[] expected=new AclEntry[]{aclEntry(ACCESS,USER,"bruce",READ_EXECUTE),aclEntry(ACCESS,GROUP,NONE)}; AclStatus s=hdfs.getAclStatus(filePath); AclEntry[] returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission((short)010550,filePath); s=hdfs.getAclStatus(subdirPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission((short)010550,subdirPath); s=hdfs.getAclStatus(fileSnapshotPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission((short)010550,fileSnapshotPath); assertFilePermissionGranted(fsAsBruce,BRUCE,fileSnapshotPath); assertFilePermissionDenied(fsAsDiana,DIANA,fileSnapshotPath); s=hdfs.getAclStatus(subdirSnapshotPath); returned=s.getEntries().toArray(new AclEntry[0]); assertArrayEquals(expected,returned); assertPermission((short)010550,subdirSnapshotPath); assertDirPermissionGranted(fsAsBruce,BRUCE,subdirSnapshotPath); assertDirPermissionDenied(fsAsDiana,DIANA,subdirSnapshotPath); aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,READ_EXECUTE),aclEntry(ACCESS,USER,"diana",ALL),aclEntry(ACCESS,GROUP,NONE),aclEntry(ACCESS,OTHER,NONE)); hdfs.setAcl(filePath,aclSpec); hdfs.setAcl(subdirPath,aclSpec); doSnapshotContentsChangeAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath); restart(false); doSnapshotContentsChangeAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath); restart(true); doSnapshotContentsChangeAssertions(filePath,fileSnapshotPath,subdirPath,subdirSnapshotPath); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestCheckpointsWithSnapshots

InternalCallVerifier EqualityVerifier 
/** * Regression test for HDFS-5433 - "When reloading fsimage during * checkpointing, we should clear existing snapshottable directories" */ @Test public void testCheckpoint() throws IOException { MiniDFSCluster cluster=null; SecondaryNameNode secondary=null; try { cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); secondary=new SecondaryNameNode(conf); SnapshotManager nnSnapshotManager=cluster.getNamesystem().getSnapshotManager(); SnapshotManager secondarySnapshotManager=secondary.getFSNamesystem().getSnapshotManager(); FileSystem fs=cluster.getFileSystem(); HdfsAdmin admin=new HdfsAdmin(FileSystem.getDefaultUri(conf),conf); assertEquals(0,nnSnapshotManager.getNumSnapshots()); assertEquals(0,nnSnapshotManager.getNumSnapshottableDirs()); assertEquals(0,secondarySnapshotManager.getNumSnapshots()); assertEquals(0,secondarySnapshotManager.getNumSnapshottableDirs()); fs.mkdirs(TEST_PATH); admin.allowSnapshot(TEST_PATH); assertEquals(0,nnSnapshotManager.getNumSnapshots()); assertEquals(1,nnSnapshotManager.getNumSnapshottableDirs()); Path snapshotPath=fs.createSnapshot(TEST_PATH); assertEquals(1,nnSnapshotManager.getNumSnapshots()); assertEquals(1,nnSnapshotManager.getNumSnapshottableDirs()); secondary.doCheckpoint(); assertEquals(1,secondarySnapshotManager.getNumSnapshots()); assertEquals(1,secondarySnapshotManager.getNumSnapshottableDirs()); fs.deleteSnapshot(TEST_PATH,snapshotPath.getName()); admin.disallowSnapshot(TEST_PATH); assertEquals(0,nnSnapshotManager.getNumSnapshots()); assertEquals(0,nnSnapshotManager.getNumSnapshottableDirs()); NameNodeAdapter.enterSafeMode(cluster.getNameNode(),false); NameNodeAdapter.saveNamespace(cluster.getNameNode()); NameNodeAdapter.leaveSafeMode(cluster.getNameNode()); secondary.doCheckpoint(); assertEquals(0,secondarySnapshotManager.getNumSnapshots()); assertEquals(0,secondarySnapshotManager.getNumSnapshottableDirs()); } finally { if (cluster != null) { cluster.shutdown(); } if (secondary != null) { secondary.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestINodeFileUnderConstructionWithSnapshot

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test snapshot during file appending, before the corresponding{@link FSDataOutputStream} instance closes. */ @Test(timeout=60000) public void testSnapshotWhileAppending() throws Exception { Path file=new Path(dir,"file"); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed); HdfsDataOutputStream out=appendFileWithoutClosing(file,BLOCKSIZE); out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); SnapshotTestHelper.createSnapshot(hdfs,dir,"s0"); out.close(); INodeFile fileNode=(INodeFile)fsdir.getINode(file.toString()); assertEquals(BLOCKSIZE * 2,fileNode.computeFileSize()); INodeDirectory dirNode=fsdir.getINode(dir.toString()).asDirectory(); DirectoryDiff last=dirNode.getDiffs().getLast(); out=appendFileWithoutClosing(file,BLOCKSIZE); out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); dirNode=fsdir.getINode(dir.toString()).asDirectory(); assertEquals(BLOCKSIZE * 2,fileNode.computeFileSize(last.getSnapshotId())); hdfs.createSnapshot(dir,"s1"); out.close(); fileNode=(INodeFile)fsdir.getINode(file.toString()); dirNode=fsdir.getINode(dir.toString()).asDirectory(); last=dirNode.getDiffs().getLast(); assertTrue(fileNode.isWithSnapshot()); assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize(last.getSnapshotId())); hdfs.setReplication(file,(short)(REPLICATION - 1)); out=appendFileWithoutClosing(file,BLOCKSIZE); hdfs.createSnapshot(dir,"s2"); out.close(); assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize(last.getSnapshotId())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only * blocks within the size range are returned. */ @Test public void testGetBlockLocations() throws Exception { final Path root=new Path("/"); final Path file=new Path("/file"); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed); SnapshotTestHelper.createSnapshot(hdfs,root,"s1"); final Path fileInSnapshot=SnapshotTestHelper.getSnapshotPath(root,"s1",file.getName()); FileStatus status=hdfs.getFileStatus(fileInSnapshot); assertEquals(BLOCKSIZE,status.getLen()); DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE - 1); status=hdfs.getFileStatus(fileInSnapshot); assertEquals(BLOCKSIZE,status.getLen()); status=hdfs.getFileStatus(file); assertEquals(BLOCKSIZE * 2 - 1,status.getLen()); LocatedBlocks blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot.toString(),0,Long.MAX_VALUE); List blockList=blocks.getLocatedBlocks(); assertEquals(BLOCKSIZE,blocks.getFileLength()); assertEquals(1,blockList.size()); LocatedBlock lastBlock=blocks.getLastLocatedBlock(); assertEquals(0,lastBlock.getStartOffset()); assertEquals(BLOCKSIZE,lastBlock.getBlockSize()); SnapshotTestHelper.createSnapshot(hdfs,root,"s2"); final Path fileInSnapshot2=SnapshotTestHelper.getSnapshotPath(root,"s2",file.getName()); HdfsDataOutputStream out=appendFileWithoutClosing(file,BLOCKSIZE); out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); status=hdfs.getFileStatus(fileInSnapshot2); assertEquals(BLOCKSIZE * 2 - 1,status.getLen()); status=hdfs.getFileStatus(file); assertEquals(BLOCKSIZE * 3 - 1,status.getLen()); blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot2.toString(),0,Long.MAX_VALUE); assertFalse(blocks.isUnderConstruction()); assertTrue(blocks.isLastBlockComplete()); blockList=blocks.getLocatedBlocks(); assertEquals(BLOCKSIZE * 2 - 1,blocks.getFileLength()); assertEquals(2,blockList.size()); lastBlock=blocks.getLastLocatedBlock(); assertEquals(BLOCKSIZE,lastBlock.getStartOffset()); assertEquals(BLOCKSIZE,lastBlock.getBlockSize()); blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),fileInSnapshot2.toString(),BLOCKSIZE,0); blockList=blocks.getLocatedBlocks(); assertEquals(1,blockList.size()); blocks=DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),file.toString(),0,Long.MAX_VALUE); blockList=blocks.getLocatedBlocks(); assertEquals(3,blockList.size()); assertTrue(blocks.isUnderConstruction()); assertFalse(blocks.isLastBlockComplete()); lastBlock=blocks.getLastLocatedBlock(); assertEquals(BLOCKSIZE * 2,lastBlock.getStartOffset()); assertEquals(BLOCKSIZE - 1,lastBlock.getBlockSize()); out.close(); }

InternalCallVerifier EqualityVerifier 
/** * Test snapshot after file appending */ @Test(timeout=60000) public void testSnapshotAfterAppending() throws Exception { Path file=new Path(dir,"file"); SnapshotTestHelper.createSnapshot(hdfs,dir,"s0"); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE); INodeFile fileNode=(INodeFile)fsdir.getINode(file.toString()); hdfs.createSnapshot(dir,"s1"); hdfs.setReplication(file,(short)(REPLICATION - 1)); DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE); fileNode=(INodeFile)fsdir.getINode(file.toString()); assertEquals(REPLICATION - 1,fileNode.getFileReplication()); assertEquals(BLOCKSIZE * 3,fileNode.computeFileSize()); hdfs.createSnapshot(dir,"s2"); DFSTestUtil.appendFile(hdfs,file,BLOCKSIZE); fileNode=(INodeFile)fsdir.getINode(file.toString()); assertEquals(REPLICATION - 1,fileNode.getFileReplication()); assertEquals(BLOCKSIZE * 4,fileNode.computeFileSize()); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestNestedSnapshots

APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test the snapshot limit of a single snapshottable directory. * @throws Exception */ @Test(timeout=300000) public void testSnapshotLimit() throws Exception { final int step=1000; final String dirStr="/testSnapshotLimit/dir"; final Path dir=new Path(dirStr); hdfs.mkdirs(dir,new FsPermission((short)0777)); hdfs.allowSnapshot(dir); int s=0; for (; s < SNAPSHOT_LIMIT; s++) { final String snapshotName="s" + s; hdfs.createSnapshot(dir,snapshotName); if (s % step == 0) { final Path file=new Path(dirStr,"f" + s); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,SEED); } } try { hdfs.createSnapshot(dir,"s" + s); Assert.fail("Expected to fail to create snapshot, but didn't."); } catch ( IOException ioe) { SnapshotTestHelper.LOG.info("The exception is expected.",ioe); } for (int f=0; f < SNAPSHOT_LIMIT; f+=step) { final String file="f" + f; s=RANDOM.nextInt(step); for (; s < SNAPSHOT_LIMIT; s+=RANDOM.nextInt(step)) { final Path p=SnapshotTestHelper.getSnapshotPath(dir,"s" + s,file); Assert.assertEquals(s > f,hdfs.exists(p)); } } }

UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testSnapshotWithQuota() throws Exception { final String dirStr="/testSnapshotWithQuota/dir"; final Path dir=new Path(dirStr); hdfs.mkdirs(dir,new FsPermission((short)0777)); hdfs.allowSnapshot(dir); final int NS_QUOTA=6; hdfs.setQuota(dir,NS_QUOTA,HdfsConstants.QUOTA_DONT_SET); final Path foo=new Path(dir,"foo"); final Path f1=new Path(foo,"f1"); DFSTestUtil.createFile(hdfs,f1,BLOCKSIZE,REPLICATION,SEED); { final Path snapshotPath=hdfs.createSnapshot(dir); final String snapshotName=snapshotPath.getName(); Assert.assertTrue("snapshotName=" + snapshotName,Pattern.matches("s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",snapshotName)); final Path parent=snapshotPath.getParent(); Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR,parent.getName()); Assert.assertEquals(dir,parent.getParent()); } final Path f2=new Path(foo,"f2"); DFSTestUtil.createFile(hdfs,f2,BLOCKSIZE,REPLICATION,SEED); try { final Path f3=new Path(foo,"f3"); DFSTestUtil.createFile(hdfs,f3,BLOCKSIZE,REPLICATION,SEED); Assert.fail(); } catch ( NSQuotaExceededException e) { SnapshotTestHelper.LOG.info("The exception is expected.",e); } try { hdfs.createSnapshot(dir); Assert.fail(); } catch ( NSQuotaExceededException e) { SnapshotTestHelper.LOG.info("The exception is expected.",e); } try { hdfs.setPermission(f1,new FsPermission((short)0)); Assert.fail(); } catch ( RemoteException e) { Assert.assertSame(NSQuotaExceededException.class,e.unwrapRemoteException().getClass()); SnapshotTestHelper.LOG.info("The exception is expected.",e); } hdfs.setPermission(f2,new FsPermission((short)0)); hdfs.setQuota(dir,NS_QUOTA + 2,HdfsConstants.QUOTA_DONT_SET); hdfs.createSnapshot(dir,"s1"); hdfs.setPermission(foo,new FsPermission((short)0444)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test {@link Snapshot#ID_COMPARATOR}. */ @Test(timeout=300000) public void testIdCmp(){ final PermissionStatus perm=PermissionStatus.createImmutable("user","group",FsPermission.createImmutable((short)0)); final INodeDirectory snapshottable=new INodeDirectory(0,DFSUtil.string2Bytes("foo"),perm,0L); snapshottable.addSnapshottableFeature(); final Snapshot[] snapshots={new Snapshot(1,"s1",snapshottable),new Snapshot(1,"s1",snapshottable),new Snapshot(2,"s2",snapshottable),new Snapshot(2,"s2",snapshottable)}; Assert.assertEquals(0,Snapshot.ID_COMPARATOR.compare(null,null)); for ( Snapshot s : snapshots) { Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null,s) > 0); Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s,null) < 0); for ( Snapshot t : snapshots) { final int expected=s.getRoot().getLocalName().compareTo(t.getRoot().getLocalName()); final int computed=Snapshot.ID_COMPARATOR.compare(s,t); Assert.assertEquals(expected > 0,computed > 0); Assert.assertEquals(expected == 0,computed == 0); Assert.assertEquals(expected < 0,computed < 0); } } }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestRenameWithSnapshots

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testRenameFromSDir2NonSDir() throws Exception { final String dirStr="/testRenameWithSnapshot"; final String abcStr=dirStr + "/abc"; final Path abc=new Path(abcStr); hdfs.mkdirs(abc,new FsPermission((short)0777)); hdfs.allowSnapshot(abc); final Path foo=new Path(abc,"foo"); DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED); hdfs.createSnapshot(abc,"s0"); try { hdfs.rename(abc,new Path(dirStr,"tmp")); fail("Expect exception since " + abc + " is snapshottable and already has snapshots"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains(abcStr + " is snapshottable and already has snapshots",e); } final String xyzStr=dirStr + "/xyz"; final Path xyz=new Path(xyzStr); hdfs.mkdirs(xyz,new FsPermission((short)0777)); final Path bar=new Path(xyz,"bar"); hdfs.rename(foo,bar); final INode fooRef=fsdir.getINode(SnapshotTestHelper.getSnapshotPath(abc,"s0","foo").toString()); Assert.assertTrue(fooRef.isReference()); Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName); final INodeReference.WithCount withCount=(INodeReference.WithCount)fooRef.asReference().getReferredINode(); Assert.assertEquals(2,withCount.getReferenceCount()); final INode barRef=fsdir.getINode(bar.toString()); Assert.assertTrue(barRef.isReference()); Assert.assertSame(withCount,barRef.asReference().getReferredINode()); hdfs.delete(bar,false); Assert.assertEquals(1,withCount.getReferenceCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Make sure we clean the whole subtree under a DstReference node after * deleting a snapshot. * see HDFS-5476. */ @Test public void testCleanDstReference() throws Exception { final Path test=new Path("/test"); final Path foo=new Path(test,"foo"); final Path bar=new Path(foo,"bar"); hdfs.mkdirs(bar); SnapshotTestHelper.createSnapshot(hdfs,test,"s0"); final Path fileInBar=new Path(bar,"file"); DFSTestUtil.createFile(hdfs,fileInBar,BLOCKSIZE,REPL,SEED); final Path foo2=new Path(test,"foo2"); hdfs.rename(foo,foo2); hdfs.createSnapshot(test,"s1"); hdfs.delete(new Path(foo2,"bar"),true); hdfs.delete(foo2,true); final Path sfileInBar=SnapshotTestHelper.getSnapshotPath(test,"s1","foo2/bar/file"); assertTrue(hdfs.exists(sfileInBar)); hdfs.deleteSnapshot(test,"s1"); assertFalse(hdfs.exists(sfileInBar)); restartClusterAndCheckImage(true); final Path barInS0=SnapshotTestHelper.getSnapshotPath(test,"s0","foo/bar"); INodeDirectory barNode=fsdir.getINode(barInS0.toString()).asDirectory(); assertEquals(0,barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size()); List diffList=barNode.getDiffs().asList(); assertEquals(1,diffList.size()); DirectoryDiff diff=diffList.get(0); assertEquals(0,diff.getChildrenDiff().getList(ListType.DELETED).size()); assertEquals(0,diff.getChildrenDiff().getList(ListType.CREATED).size()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the undo section of rename. Before the rename, we create the renamed * file/dir before taking the snapshot. */ @Test public void testRenameUndo_1() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); final Path dir2file=new Path(sdir2,"file"); DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory(); INodeDirectory mockDir2=spy(dir2); doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt()); INodeDirectory root=fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir2,mockDir2,fsdir.getINodeMap()); final Path newfoo=new Path(sdir2,"foo"); boolean result=hdfs.rename(foo,newfoo); assertFalse(result); INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory(); Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir1Children.size()); assertEquals(foo.getName(),dir1Children.get(0).getLocalName()); List dir1Diffs=dir1Node.getDiffs().asList(); assertEquals(1,dir1Diffs.size()); assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId()); ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff(); assertEquals(0,childrenDiff.getList(ListType.DELETED).size()); assertEquals(0,childrenDiff.getList(ListType.CREATED).size()); INode fooNode=fsdir.getINode4Write(foo.toString()); assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot()); List fooDiffs=fooNode.asDirectory().getDiffs().asList(); assertEquals(1,fooDiffs.size()); assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId()); final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo"); INode fooNode_s1=fsdir.getINode(foo_s1.toString()); assertTrue(fooNode_s1 == fooNode); assertFalse(hdfs.exists(newfoo)); INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); assertFalse(dir2Node.isWithSnapshot()); ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir2Children.size()); assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Unit test for HDFS-4842. */ @Test public void testRenameDirAndDeleteSnapshot_7() throws Exception { fsn.getSnapshotManager().setAllowNestedSnapshots(true); final Path test=new Path("/test"); final Path dir1=new Path(test,"dir1"); final Path dir2=new Path(test,"dir2"); hdfs.mkdirs(dir1); hdfs.mkdirs(dir2); final Path foo=new Path(dir2,"foo"); final Path bar=new Path(foo,"bar"); final Path file=new Path(bar,"file"); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,test,"s0"); SnapshotTestHelper.createSnapshot(hdfs,test,"s1"); hdfs.delete(file,true); SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2"); final Path newfoo=new Path(dir1,foo.getName()); hdfs.rename(foo,newfoo); hdfs.deleteSnapshot(test,"s1"); final Path file_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2","foo/bar/file"); assertFalse(hdfs.exists(file_s2)); final Path file_s0=SnapshotTestHelper.getSnapshotPath(test,"s0","dir2/foo/bar/file"); assertTrue(hdfs.exists(file_s0)); INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory(); List dir1DiffList=dir1Node.getDiffs().asList(); assertEquals(1,dir1DiffList.size()); List dList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.DELETED); assertTrue(dList.isEmpty()); List cList=dir1DiffList.get(0).getChildrenDiff().getList(ListType.CREATED); assertEquals(1,cList.size()); INode cNode=cList.get(0); INode fooNode=fsdir.getINode4Write(newfoo.toString()); assertSame(cNode,fooNode); final Path newbar=new Path(newfoo,bar.getName()); INodeDirectory barNode=fsdir.getINode4Write(newbar.toString()).asDirectory(); assertSame(fooNode.asDirectory(),barNode.getParent()); List barDiffList=barNode.getDiffs().asList(); assertEquals(1,barDiffList.size()); DirectoryDiff diff=barDiffList.get(0); INodeDirectory testNode=fsdir.getINode4Write(test.toString()).asDirectory(); Snapshot s0=testNode.getSnapshot(DFSUtil.string2Bytes("s0")); assertEquals(s0.getId(),diff.getSnapshotId()); assertEquals("file",diff.getChildrenDiff().getList(ListType.DELETED).get(0).getLocalName()); INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory(); List dir2DiffList=dir2Node.getDiffs().asList(); assertEquals(1,dir2DiffList.size()); dList=dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED); assertEquals(1,dList.size()); final Path foo_s2=SnapshotTestHelper.getSnapshotPath(dir2,"s2",foo.getName()); INodeReference.WithName fooNode_s2=(INodeReference.WithName)fsdir.getINode(foo_s2.toString()); assertSame(dList.get(0),fooNode_s2); assertSame(fooNode.asReference().getReferredINode(),fooNode_s2.getReferredINode()); restartClusterAndCheckImage(true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the undo section of rename. Before the rename, we create the renamed * file/dir after taking the snapshot. */ @Test public void testRenameUndo_2() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path dir2file=new Path(sdir2,"file"); DFSTestUtil.createFile(hdfs,dir2file,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); INodeDirectory dir2=fsdir.getINode4Write(sdir2.toString()).asDirectory(); INodeDirectory mockDir2=spy(dir2); doReturn(false).when(mockDir2).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt()); INodeDirectory root=fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir2,mockDir2,fsdir.getINodeMap()); final Path newfoo=new Path(sdir2,"foo"); boolean result=hdfs.rename(foo,newfoo); assertFalse(result); INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory(); Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); ReadOnlyList dir1Children=dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir1Children.size()); assertEquals(foo.getName(),dir1Children.get(0).getLocalName()); List dir1Diffs=dir1Node.getDiffs().asList(); assertEquals(1,dir1Diffs.size()); assertEquals(s1.getId(),dir1Diffs.get(0).getSnapshotId()); ChildrenDiff childrenDiff=dir1Diffs.get(0).getChildrenDiff(); assertEquals(0,childrenDiff.getList(ListType.DELETED).size()); assertEquals(1,childrenDiff.getList(ListType.CREATED).size()); INode fooNode=fsdir.getINode4Write(foo.toString()); assertTrue(fooNode instanceof INodeDirectory); assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode); final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo"); assertFalse(hdfs.exists(foo_s1)); assertFalse(hdfs.exists(newfoo)); INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); assertFalse(dir2Node.isWithSnapshot()); ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir2Children.size()); assertEquals(dir2file.getName(),dir2Children.get(0).getLocalName()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test rename a dir and a file multiple times across snapshottable * directories: /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo * Only create snapshots in the beginning (before the rename). */ @Test public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path sdir3=new Path("/dir3"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); hdfs.mkdirs(sdir3); final Path foo_dir1=new Path(sdir1,"foo"); final Path bar1_dir1=new Path(foo_dir1,"bar1"); final Path bar2_dir1=new Path(sdir1,"bar"); DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED); DFSTestUtil.createFile(hdfs,bar2_dir1,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3"); final Path foo_dir2=new Path(sdir2,"foo"); hdfs.rename(foo_dir1,foo_dir2); final Path bar2_dir2=new Path(sdir2,"bar"); hdfs.rename(bar2_dir1,bar2_dir2); restartClusterAndCheckImage(true); final Path bar1_dir2=new Path(foo_dir2,"bar1"); hdfs.setReplication(bar1_dir2,REPL_1); hdfs.setReplication(bar2_dir2,REPL_1); final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1"); final Path bar2_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar"); final Path bar1_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar1"); final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","bar"); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar2_s1)); assertFalse(hdfs.exists(bar1_s2)); assertFalse(hdfs.exists(bar2_s2)); FileStatus statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_dir2); assertEquals(REPL_1,statusBar1.getReplication()); FileStatus statusBar2=hdfs.getFileStatus(bar2_s1); assertEquals(REPL,statusBar2.getReplication()); statusBar2=hdfs.getFileStatus(bar2_dir2); assertEquals(REPL_1,statusBar2.getReplication()); final Path foo_dir3=new Path(sdir3,"foo"); hdfs.rename(foo_dir2,foo_dir3); final Path bar2_dir3=new Path(sdir3,"bar"); hdfs.rename(bar2_dir2,bar2_dir3); restartClusterAndCheckImage(true); final Path bar1_dir3=new Path(foo_dir3,"bar1"); hdfs.setReplication(bar1_dir3,REPL_2); hdfs.setReplication(bar2_dir3,REPL_2); final Path bar1_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","foo/bar1"); final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir3,"s3","bar"); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar2_s1)); assertFalse(hdfs.exists(bar1_s2)); assertFalse(hdfs.exists(bar2_s2)); assertFalse(hdfs.exists(bar1_s3)); assertFalse(hdfs.exists(bar2_s3)); statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_dir3); assertEquals(REPL_2,statusBar1.getReplication()); statusBar2=hdfs.getFileStatus(bar2_s1); assertEquals(REPL,statusBar2.getReplication()); statusBar2=hdfs.getFileStatus(bar2_dir3); assertEquals(REPL_2,statusBar2.getReplication()); hdfs.rename(foo_dir3,foo_dir2); hdfs.rename(bar2_dir3,bar2_dir2); restartClusterAndCheckImage(true); hdfs.setReplication(bar1_dir2,REPL); hdfs.setReplication(bar2_dir2,REPL); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar2_s1)); assertFalse(hdfs.exists(bar1_s2)); assertFalse(hdfs.exists(bar2_s2)); assertFalse(hdfs.exists(bar1_s3)); assertFalse(hdfs.exists(bar2_s3)); statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_dir2); assertEquals(REPL,statusBar1.getReplication()); statusBar2=hdfs.getFileStatus(bar2_s1); assertEquals(REPL,statusBar2.getReplication()); statusBar2=hdfs.getFileStatus(bar2_dir2); assertEquals(REPL,statusBar2.getReplication()); hdfs.rename(foo_dir2,foo_dir1); hdfs.rename(bar2_dir2,bar2_dir1); INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference(); INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode(); assertEquals(2,fooWithCount.getReferenceCount()); INodeDirectory foo=fooWithCount.asDirectory(); assertEquals(1,foo.getDiffs().asList().size()); INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory(); Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); assertEquals(s1.getId(),foo.getDirectoryWithSnapshotFeature().getLastSnapshotId()); INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile(); assertEquals(1,bar1.getDiffs().asList().size()); assertEquals(s1.getId(),bar1.getDiffs().getLastSnapshotId()); INodeReference barRef=fsdir.getINode4Write(bar2_dir1.toString()).asReference(); INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode(); assertEquals(2,barWithCount.getReferenceCount()); INodeFile bar=barWithCount.asFile(); assertEquals(1,bar.getDiffs().asList().size()); assertEquals(s1.getId(),bar.getDiffs().getLastSnapshotId()); restartClusterAndCheckImage(true); hdfs.delete(foo_dir1,true); hdfs.delete(bar2_dir1,true); restartClusterAndCheckImage(true); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar2_s1)); assertFalse(hdfs.exists(bar1_s2)); assertFalse(hdfs.exists(bar2_s2)); assertFalse(hdfs.exists(bar1_s3)); assertFalse(hdfs.exists(bar2_s3)); assertFalse(hdfs.exists(foo_dir1)); assertFalse(hdfs.exists(bar1_dir1)); assertFalse(hdfs.exists(bar2_dir1)); statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar2=hdfs.getFileStatus(bar2_s1); assertEquals(REPL,statusBar2.getReplication()); final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo"); fooRef=fsdir.getINode(foo_s1.toString()).asReference(); fooWithCount=(WithCount)fooRef.getReferredINode(); assertEquals(1,fooWithCount.getReferenceCount()); barRef=fsdir.getINode(bar2_s1.toString()).asReference(); barWithCount=(WithCount)barRef.getReferredINode(); assertEquals(1,barWithCount.getReferenceCount()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the undo section of the second-time rename. */ @Test public void testRenameUndo_3() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path sdir3=new Path("/dir3"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); hdfs.mkdirs(sdir3); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory(); INodeDirectory mockDir3=spy(dir3); doReturn(false).when(mockDir3).addChild((INode)anyObject(),anyBoolean(),Mockito.anyInt()); INodeDirectory root=fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir3,mockDir3,fsdir.getINodeMap()); final Path foo_dir2=new Path(sdir2,"foo2"); final Path foo_dir3=new Path(sdir3,"foo3"); hdfs.rename(foo,foo_dir2); boolean result=hdfs.rename(foo_dir2,foo_dir3); assertFalse(result); INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory(); Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); Snapshot s2=dir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); ReadOnlyList dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir2Children.size()); List dir2Diffs=dir2Node.getDiffs().asList(); assertEquals(1,dir2Diffs.size()); assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId()); ChildrenDiff childrenDiff=dir2Diffs.get(0).getChildrenDiff(); assertEquals(0,childrenDiff.getList(ListType.DELETED).size()); assertEquals(1,childrenDiff.getList(ListType.CREATED).size()); final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo2"); assertFalse(hdfs.exists(foo_s2)); INode fooNode=fsdir.getINode4Write(foo_dir2.toString()); assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode); assertTrue(fooNode instanceof INodeReference.DstReference); List fooDiffs=fooNode.asDirectory().getDiffs().asList(); assertEquals(1,fooDiffs.size()); assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId()); hdfs.createSnapshot(sdir2,"s3"); result=hdfs.rename(foo_dir2,foo_dir3); assertFalse(result); dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); Snapshot s3=dir2Node.getSnapshot(DFSUtil.string2Bytes("s3")); fooNode=fsdir.getINode4Write(foo_dir2.toString()); dir2Children=dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,dir2Children.size()); dir2Diffs=dir2Node.getDiffs().asList(); assertEquals(2,dir2Diffs.size()); assertEquals(s2.getId(),dir2Diffs.get(0).getSnapshotId()); assertEquals(s3.getId(),dir2Diffs.get(1).getSnapshotId()); childrenDiff=dir2Diffs.get(0).getChildrenDiff(); assertEquals(0,childrenDiff.getList(ListType.DELETED).size()); assertEquals(1,childrenDiff.getList(ListType.CREATED).size()); assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode); childrenDiff=dir2Diffs.get(1).getChildrenDiff(); assertEquals(0,childrenDiff.getList(ListType.DELETED).size()); assertEquals(0,childrenDiff.getList(ListType.CREATED).size()); final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo2"); assertFalse(hdfs.exists(foo_s2)); assertTrue(hdfs.exists(foo_s3)); assertTrue(fooNode instanceof INodeReference.DstReference); fooDiffs=fooNode.asDirectory().getDiffs().asList(); assertEquals(2,fooDiffs.size()); assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId()); assertEquals(s3.getId(),fooDiffs.get(1).getSnapshotId()); }

UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test undo where dst node being overwritten is a reference node */ @Test public void testRenameUndo_4() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path sdir3=new Path("/dir3"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); hdfs.mkdirs(sdir3); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); final Path foo2=new Path(sdir2,"foo2"); hdfs.mkdirs(foo2); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); final Path foo3=new Path(sdir3,"foo3"); hdfs.rename(foo2,foo3); INode foo3Node=fsdir.getINode4Write(foo3.toString()); assertTrue(foo3Node.isReference()); INodeDirectory dir3=fsdir.getINode4Write(sdir3.toString()).asDirectory(); INodeDirectory mockDir3=spy(dir3); doReturn(false).when(mockDir3).addChild((INode)Mockito.isNull(),anyBoolean(),Mockito.anyInt()); Mockito.when(mockDir3.addChild((INode)Mockito.isNotNull(),anyBoolean(),Mockito.anyInt())).thenReturn(false).thenCallRealMethod(); INodeDirectory root=fsdir.getINode4Write("/").asDirectory(); root.replaceChild(dir3,mockDir3,fsdir.getINodeMap()); foo3Node.setParent(mockDir3); try { hdfs.rename(foo,foo3,Rename.OVERWRITE); fail("the rename from " + foo + " to "+ foo3+ " should fail"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("rename from " + foo + " to "+ foo3+ " failed.",e); } final INode foo3Node_undo=fsdir.getINode4Write(foo3.toString()); assertSame(foo3Node,foo3Node_undo); INodeReference.WithCount foo3_wc=(WithCount)foo3Node.asReference().getReferredINode(); assertEquals(2,foo3_wc.getReferenceCount()); assertSame(foo3Node,foo3_wc.getParentReference()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * After the following operations: * Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir * again -> delete snapshot s on dst tree * Make sure we only delete the snapshot s under the renamed dir. */ @Test public void testRenameDirAndDeleteSnapshot_4() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); hdfs.mkdirs(sdir2); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); final Path foo2=new Path(sdir2,"foo"); hdfs.rename(foo,foo2); final Path bar2=new Path(foo2,"bar2"); DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED); final Path bar3=new Path(foo2,"bar3"); DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED); hdfs.createSnapshot(sdir2,"s3"); hdfs.rename(foo2,foo); hdfs.deleteSnapshot(sdir2,"s3"); final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory(); Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(9,q1.get(Quota.NAMESPACE)); final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(2,q2.get(Quota.NAMESPACE)); final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName()); final INode fooRef=fsdir.getINode(foo_s1.toString()); assertTrue(fooRef instanceof INodeReference.WithName); INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode(); assertEquals(2,wc.getReferenceCount()); INodeDirectory fooNode=wc.getReferredINode().asDirectory(); ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(3,children.size()); assertEquals(bar.getName(),children.get(0).getLocalName()); assertEquals(bar2.getName(),children.get(1).getLocalName()); assertEquals(bar3.getName(),children.get(2).getLocalName()); List diffList=fooNode.getDiffs().asList(); assertEquals(1,diffList.size()); Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); assertEquals(s1.getId(),diffList.get(0).getSnapshotId()); ChildrenDiff diff=diffList.get(0).getChildrenDiff(); assertEquals(2,diff.getList(ListType.CREATED).size()); assertEquals(0,diff.getList(ListType.DELETED).size()); final INode fooRef2=fsdir.getINode4Write(foo.toString()); assertTrue(fooRef2 instanceof INodeReference.DstReference); INodeReference.WithCount wc2=(WithCount)fooRef2.asReference().getReferredINode(); assertSame(wc,wc2); assertSame(fooRef2,wc.getParentReference()); restartClusterAndCheckImage(true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test rename a dir multiple times across snapshottable directories: * /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo * Create snapshots after each rename. */ @Test public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path sdir3=new Path("/dir3"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); hdfs.mkdirs(sdir3); final Path foo_dir1=new Path(sdir1,"foo"); final Path bar1_dir1=new Path(foo_dir1,"bar1"); final Path bar_dir1=new Path(sdir1,"bar"); DFSTestUtil.createFile(hdfs,bar1_dir1,BLOCKSIZE,REPL,SEED); DFSTestUtil.createFile(hdfs,bar_dir1,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s3"); final Path foo_dir2=new Path(sdir2,"foo"); hdfs.rename(foo_dir1,foo_dir2); final Path bar_dir2=new Path(sdir2,"bar"); hdfs.rename(bar_dir1,bar_dir2); final Path bar1_dir2=new Path(foo_dir2,"bar1"); hdfs.setReplication(bar1_dir2,REPL_1); hdfs.setReplication(bar_dir2,REPL_1); restartClusterAndCheckImage(true); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s11"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s22"); SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s33"); final Path foo_dir3=new Path(sdir3,"foo"); hdfs.rename(foo_dir2,foo_dir3); final Path bar_dir3=new Path(sdir3,"bar"); hdfs.rename(bar_dir2,bar_dir3); final Path bar1_dir3=new Path(foo_dir3,"bar1"); hdfs.setReplication(bar1_dir3,REPL_2); hdfs.setReplication(bar_dir3,REPL_2); restartClusterAndCheckImage(true); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s111"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s222"); SnapshotTestHelper.createSnapshot(hdfs,sdir3,"s333"); final Path bar1_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","foo/bar1"); final Path bar1_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","foo/bar1"); final Path bar1_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","foo/bar1"); final Path bar_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1","bar"); final Path bar_s22=SnapshotTestHelper.getSnapshotPath(sdir2,"s22","bar"); final Path bar_s333=SnapshotTestHelper.getSnapshotPath(sdir3,"s333","bar"); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar1_s22)); assertTrue(hdfs.exists(bar1_s333)); assertTrue(hdfs.exists(bar_s1)); assertTrue(hdfs.exists(bar_s22)); assertTrue(hdfs.exists(bar_s333)); FileStatus statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_dir3); assertEquals(REPL_2,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_s22); assertEquals(REPL_1,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_s333); assertEquals(REPL_2,statusBar1.getReplication()); FileStatus statusBar=hdfs.getFileStatus(bar_s1); assertEquals(REPL,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_dir3); assertEquals(REPL_2,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_s22); assertEquals(REPL_1,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_s333); assertEquals(REPL_2,statusBar.getReplication()); hdfs.rename(foo_dir3,foo_dir2); hdfs.rename(bar_dir3,bar_dir2); hdfs.setReplication(bar1_dir2,REPL); hdfs.setReplication(bar_dir2,REPL); restartClusterAndCheckImage(true); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1111"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2222"); final Path bar1_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo/bar1"); final Path bar_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","bar"); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar1_s22)); assertTrue(hdfs.exists(bar1_s333)); assertTrue(hdfs.exists(bar1_s2222)); assertTrue(hdfs.exists(bar_s1)); assertTrue(hdfs.exists(bar_s22)); assertTrue(hdfs.exists(bar_s333)); assertTrue(hdfs.exists(bar_s2222)); statusBar1=hdfs.getFileStatus(bar1_s1); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_dir2); assertEquals(REPL,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_s22); assertEquals(REPL_1,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_s333); assertEquals(REPL_2,statusBar1.getReplication()); statusBar1=hdfs.getFileStatus(bar1_s2222); assertEquals(REPL,statusBar1.getReplication()); statusBar=hdfs.getFileStatus(bar_s1); assertEquals(REPL,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_dir2); assertEquals(REPL,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_s22); assertEquals(REPL_1,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_s333); assertEquals(REPL_2,statusBar.getReplication()); statusBar=hdfs.getFileStatus(bar_s2222); assertEquals(REPL,statusBar.getReplication()); hdfs.rename(foo_dir2,foo_dir1); hdfs.rename(bar_dir2,bar_dir1); INodeDirectory sdir1Node=fsdir.getINode(sdir1.toString()).asDirectory(); INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory(); INodeDirectory sdir3Node=fsdir.getINode(sdir3.toString()).asDirectory(); INodeReference fooRef=fsdir.getINode4Write(foo_dir1.toString()).asReference(); INodeReference.WithCount fooWithCount=(WithCount)fooRef.getReferredINode(); assertEquals(5,fooWithCount.getReferenceCount()); INodeDirectory foo=fooWithCount.asDirectory(); List fooDiffs=foo.getDiffs().asList(); assertEquals(4,fooDiffs.size()); Snapshot s2222=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222")); Snapshot s333=sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333")); Snapshot s22=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22")); Snapshot s1=sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId()); assertEquals(s333.getId(),fooDiffs.get(2).getSnapshotId()); assertEquals(s22.getId(),fooDiffs.get(1).getSnapshotId()); assertEquals(s1.getId(),fooDiffs.get(0).getSnapshotId()); INodeFile bar1=fsdir.getINode4Write(bar1_dir1.toString()).asFile(); List bar1Diffs=bar1.getDiffs().asList(); assertEquals(3,bar1Diffs.size()); assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId()); assertEquals(s22.getId(),bar1Diffs.get(1).getSnapshotId()); assertEquals(s1.getId(),bar1Diffs.get(0).getSnapshotId()); INodeReference barRef=fsdir.getINode4Write(bar_dir1.toString()).asReference(); INodeReference.WithCount barWithCount=(WithCount)barRef.getReferredINode(); assertEquals(5,barWithCount.getReferenceCount()); INodeFile bar=barWithCount.asFile(); List barDiffs=bar.getDiffs().asList(); assertEquals(4,barDiffs.size()); assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId()); assertEquals(s333.getId(),barDiffs.get(2).getSnapshotId()); assertEquals(s22.getId(),barDiffs.get(1).getSnapshotId()); assertEquals(s1.getId(),barDiffs.get(0).getSnapshotId()); restartClusterAndCheckImage(true); hdfs.delete(foo_dir1,true); hdfs.delete(bar_dir1,true); restartClusterAndCheckImage(true); final Path bar1_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","foo/bar1"); final Path bar_s1111=SnapshotTestHelper.getSnapshotPath(sdir1,"s1111","bar"); assertTrue(hdfs.exists(bar1_s1)); assertTrue(hdfs.exists(bar1_s22)); assertTrue(hdfs.exists(bar1_s333)); assertTrue(hdfs.exists(bar1_s2222)); assertFalse(hdfs.exists(bar1_s1111)); assertTrue(hdfs.exists(bar_s1)); assertTrue(hdfs.exists(bar_s22)); assertTrue(hdfs.exists(bar_s333)); assertTrue(hdfs.exists(bar_s2222)); assertFalse(hdfs.exists(bar_s1111)); final Path foo_s2222=SnapshotTestHelper.getSnapshotPath(sdir2,"s2222","foo"); fooRef=fsdir.getINode(foo_s2222.toString()).asReference(); fooWithCount=(WithCount)fooRef.getReferredINode(); assertEquals(4,fooWithCount.getReferenceCount()); foo=fooWithCount.asDirectory(); fooDiffs=foo.getDiffs().asList(); assertEquals(4,fooDiffs.size()); assertEquals(s2222.getId(),fooDiffs.get(3).getSnapshotId()); bar1Diffs=bar1.getDiffs().asList(); assertEquals(3,bar1Diffs.size()); assertEquals(s333.getId(),bar1Diffs.get(2).getSnapshotId()); barRef=fsdir.getINode(bar_s2222.toString()).asReference(); barWithCount=(WithCount)barRef.getReferredINode(); assertEquals(4,barWithCount.getReferenceCount()); bar=barWithCount.asFile(); barDiffs=bar.getDiffs().asList(); assertEquals(4,barDiffs.size()); assertEquals(s2222.getId(),barDiffs.get(3).getSnapshotId()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test rename while the rename operation will exceed the quota in the dst * tree. */ @Test public void testRenameUndo_5() throws Exception { final Path test=new Path("/test"); final Path dir1=new Path(test,"dir1"); final Path dir2=new Path(test,"dir2"); final Path subdir2=new Path(dir2,"subdir2"); hdfs.mkdirs(dir1); hdfs.mkdirs(subdir2); final Path foo=new Path(dir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2"); hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1); final Path foo2=new Path(subdir2,foo.getName()); boolean rename=hdfs.rename(foo,foo2); assertFalse(rename); assertTrue(hdfs.exists(foo)); assertTrue(hdfs.exists(bar)); INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory(); List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1,childrenList.size()); INode fooNode=childrenList.get(0); assertTrue(fooNode.asDirectory().isWithSnapshot()); INode barNode=fsdir.getINode4Write(bar.toString()); assertTrue(barNode.getClass() == INodeFile.class); assertSame(fooNode,barNode.getParent()); List diffList=dir1Node.getDiffs().asList(); assertEquals(1,diffList.size()); DirectoryDiff diff=diffList.get(0); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory(); assertTrue(dir2Node.isSnapshottable()); Quota.Counts counts=dir2Node.computeQuotaUsage(); assertEquals(3,counts.get(Quota.NAMESPACE)); assertEquals(0,counts.get(Quota.DISKSPACE)); childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1,childrenList.size()); INode subdir2Node=childrenList.get(0); assertSame(dir2Node,subdir2Node.getParent()); assertSame(subdir2Node,fsdir.getINode4Write(subdir2.toString())); diffList=dir2Node.getDiffs().asList(); assertEquals(1,diffList.size()); diff=diffList.get(0); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * After the following operations: * Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir * -> delete snapshot s on dst tree * Make sure we destroy everything created after the rename under the renamed * dir. */ @Test public void testRenameDirAndDeleteSnapshot_3() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); hdfs.mkdirs(sdir2); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); final Path foo2=new Path(sdir2,"foo"); hdfs.rename(foo,foo2); final Path bar2=new Path(foo2,"bar2"); DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED); final Path bar3=new Path(foo2,"bar3"); DFSTestUtil.createFile(hdfs,bar3,BLOCKSIZE,REPL,SEED); hdfs.createSnapshot(sdir2,"s3"); hdfs.delete(foo2,true); hdfs.deleteSnapshot(sdir2,"s3"); final INodeDirectory dir1Node=fsdir.getINode4Write(sdir1.toString()).asDirectory(); Quota.Counts q1=dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(4,q1.get(Quota.NAMESPACE)); final INodeDirectory dir2Node=fsdir.getINode4Write(sdir2.toString()).asDirectory(); Quota.Counts q2=dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(2,q2.get(Quota.NAMESPACE)); final Path foo_s1=SnapshotTestHelper.getSnapshotPath(sdir1,"s1",foo.getName()); INode fooRef=fsdir.getINode(foo_s1.toString()); assertTrue(fooRef instanceof INodeReference.WithName); INodeReference.WithCount wc=(WithCount)fooRef.asReference().getReferredINode(); assertEquals(1,wc.getReferenceCount()); INodeDirectory fooNode=wc.getReferredINode().asDirectory(); ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,children.size()); assertEquals(bar.getName(),children.get(0).getLocalName()); List diffList=fooNode.getDiffs().asList(); assertEquals(1,diffList.size()); Snapshot s1=dir1Node.getSnapshot(DFSUtil.string2Bytes("s1")); assertEquals(s1.getId(),diffList.get(0).getSnapshotId()); ChildrenDiff diff=diffList.get(0).getChildrenDiff(); assertEquals(0,diff.getList(ListType.CREATED).size()); assertEquals(0,diff.getList(ListType.DELETED).size()); restartClusterAndCheckImage(true); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the rename undo when quota of dst tree is exceeded after rename. */ @Test public void testRenameExceedQuota() throws Exception { final Path test=new Path("/test"); final Path dir1=new Path(test,"dir1"); final Path dir2=new Path(test,"dir2"); final Path sub_dir2=new Path(dir2,"subdir"); final Path subfile_dir2=new Path(sub_dir2,"subfile"); hdfs.mkdirs(dir1); DFSTestUtil.createFile(hdfs,subfile_dir2,BLOCKSIZE,REPL,SEED); final Path foo=new Path(dir1,"foo"); DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2"); hdfs.setQuota(dir2,5,Long.MAX_VALUE - 1); hdfs.rename(foo,subfile_dir2,Rename.OVERWRITE); INode dir2Node=fsdir.getINode4Write(dir2.toString()); assertTrue(dir2Node.asDirectory().isSnapshottable()); Quota.Counts counts=dir2Node.computeQuotaUsage(); assertEquals(7,counts.get(Quota.NAMESPACE)); assertEquals(BLOCKSIZE * REPL * 2,counts.get(Quota.DISKSPACE)); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test the rename undo when removing dst node fails */ @Test public void testRenameUndo_6() throws Exception { final Path test=new Path("/test"); final Path dir1=new Path(test,"dir1"); final Path dir2=new Path(test,"dir2"); final Path sub_dir2=new Path(dir2,"subdir"); final Path subsub_dir2=new Path(sub_dir2,"subdir"); hdfs.mkdirs(dir1); hdfs.mkdirs(subsub_dir2); final Path foo=new Path(dir1,"foo"); hdfs.mkdirs(foo); SnapshotTestHelper.createSnapshot(hdfs,dir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,dir2,"s2"); hdfs.setQuota(dir2,4,Long.MAX_VALUE - 1); try { hdfs.rename(foo,subsub_dir2,Rename.OVERWRITE); fail("Expect QuotaExceedException"); } catch ( QuotaExceededException e) { String msg="Failed to record modification for snapshot: " + "The NameSpace quota (directories and files)" + " is exceeded: quota=4 file count=5"; GenericTestUtils.assertExceptionContains(msg,e); } assertTrue(hdfs.exists(foo)); INodeDirectory dir1Node=fsdir.getINode4Write(dir1.toString()).asDirectory(); List childrenList=ReadOnlyList.Util.asList(dir1Node.getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1,childrenList.size()); INode fooNode=childrenList.get(0); assertTrue(fooNode.asDirectory().isWithSnapshot()); assertSame(dir1Node,fooNode.getParent()); List diffList=dir1Node.getDiffs().asList(); assertEquals(1,diffList.size()); DirectoryDiff diff=diffList.get(0); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); INodeDirectory dir2Node=fsdir.getINode4Write(dir2.toString()).asDirectory(); assertTrue(dir2Node.isSnapshottable()); Quota.Counts counts=dir2Node.computeQuotaUsage(); assertEquals(4,counts.get(Quota.NAMESPACE)); assertEquals(0,counts.get(Quota.DISKSPACE)); childrenList=ReadOnlyList.Util.asList(dir2Node.asDirectory().getChildrenList(Snapshot.CURRENT_STATE_ID)); assertEquals(1,childrenList.size()); INode subdir2Node=childrenList.get(0); assertTrue(subdir2Node.asDirectory().isWithSnapshot()); assertSame(dir2Node,subdir2Node.getParent()); assertSame(subdir2Node,fsdir.getINode4Write(sub_dir2.toString())); INode subsubdir2Node=fsdir.getINode4Write(subsub_dir2.toString()); assertTrue(subsubdir2Node.getClass() == INodeDirectory.class); assertSame(subdir2Node,subsubdir2Node.getParent()); diffList=(dir2Node).getDiffs().asList(); assertEquals(1,diffList.size()); diff=diffList.get(0); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); diffList=subdir2Node.asDirectory().getDiffs().asList(); assertEquals(0,diffList.size()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test rename where the src/dst directories are both snapshottable * directories without snapshots. In such case we need to update the * snapshottable dir list in SnapshotManager. */ @Test(timeout=60000) public void testRenameAndUpdateSnapshottableDirs() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); final Path foo=new Path(sdir1,"foo"); final Path bar=new Path(sdir2,"bar"); hdfs.mkdirs(foo); hdfs.mkdirs(bar); hdfs.allowSnapshot(foo); SnapshotTestHelper.createSnapshot(hdfs,bar,snap1); assertEquals(2,fsn.getSnapshottableDirListing().length); INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory(); long fooId=fooNode.getId(); try { hdfs.rename(foo,bar,Rename.OVERWRITE); fail("Expect exception since " + bar + " is snapshottable and already has snapshots"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains(bar.toString() + " is snapshottable and already has snapshots",e); } hdfs.deleteSnapshot(bar,snap1); hdfs.rename(foo,bar,Rename.OVERWRITE); SnapshottableDirectoryStatus[] dirs=fsn.getSnapshottableDirListing(); assertEquals(1,dirs.length); assertEquals(bar,dirs[0].getFullPath()); assertEquals(fooId,dirs[0].getDirStatus().getFileId()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test rename to an invalid name (xxx/.snapshot) */ @Test public void testRenameUndo_7() throws Exception { final Path root=new Path("/"); final Path foo=new Path(root,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,root,snap1); final Path invalid=new Path(foo,HdfsConstants.DOT_SNAPSHOT_DIR); try { hdfs.rename(bar,invalid); fail("expect exception since invalid name is used for rename"); } catch ( Exception e) { GenericTestUtils.assertExceptionContains("\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name",e); } INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); INodeDirectory fooNode=fsdir.getINode4Write(foo.toString()).asDirectory(); ReadOnlyList children=fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(1,children.size()); List diffList=fooNode.getDiffs().asList(); assertEquals(1,diffList.size()); DirectoryDiff diff=diffList.get(0); Snapshot s1=rootNode.getSnapshot(DFSUtil.string2Bytes(snap1)); assertEquals(s1.getId(),diff.getSnapshotId()); assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty()); assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty()); INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile(); assertSame(barNode,children.get(0)); assertSame(fooNode,barNode.getParent()); List barDiffList=barNode.getDiffs().asList(); assertEquals(1,barDiffList.size()); FileDiff barDiff=barDiffList.get(0); assertEquals(s1.getId(),barDiff.getSnapshotId()); hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); cluster.shutdown(); cluster=new MiniDFSCluster.Builder(conf).format(false).numDataNodes(REPL).build(); cluster.waitActive(); restartClusterAndCheckImage(true); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Rename a single file across snapshottable dirs. */ @Test(timeout=60000) public void testRenameFileAcrossSnapshottableDirs() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo=new Path(sdir2,"foo"); DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); hdfs.createSnapshot(sdir1,"s3"); final Path newfoo=new Path(sdir1,"foo"); hdfs.rename(foo,newfoo); hdfs.setReplication(newfoo,REPL_1); final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo"); assertTrue(hdfs.exists(foo_s2)); FileStatus status=hdfs.getFileStatus(foo_s2); assertEquals(REPL,status.getReplication()); final Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo"); assertFalse(hdfs.exists(foo_s3)); INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory(); Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); INodeFile sfoo=fsdir.getINode(newfoo.toString()).asFile(); assertEquals(s2.getId(),sfoo.getDiffs().getLastSnapshotId()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * After the following steps: *
 * 1. Take snapshot s1 on /dir1 at time t1.
 * 2. Take snapshot s2 on /dir2 at time t2.
 * 3. Modify the subtree of /dir2/foo/ to make it a dir with snapshots.
 * 4. Take snapshot s3 on /dir1 at time t3.
 * 5. Rename /dir2/foo/ to /dir1/foo/.
 * 
* When changes happening on foo, the diff should be recorded in snapshot s2. */ @Test(timeout=60000) public void testRenameDirAcrossSnapshottableDirs() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo=new Path(sdir2,"foo"); final Path bar=new Path(foo,"bar"); final Path bar2=new Path(foo,"bar2"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); hdfs.setReplication(bar2,REPL_1); hdfs.delete(bar,true); hdfs.createSnapshot(sdir1,"s3"); final Path newfoo=new Path(sdir1,"foo"); hdfs.rename(foo,newfoo); final Path snapshotBar=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar"); assertTrue(hdfs.exists(snapshotBar)); final Path newBar2=new Path(newfoo,"bar2"); assertTrue(hdfs.exists(newBar2)); hdfs.delete(newBar2,true); final Path bar2_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar2"); assertTrue(hdfs.exists(bar2_s2)); FileStatus status=hdfs.getFileStatus(bar2_s2); assertEquals(REPL,status.getReplication()); final Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2"); assertFalse(hdfs.exists(bar2_s3)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * After rename, delete the snapshot in src */ @Test public void testRenameDirAndDeleteSnapshot_2() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo=new Path(sdir2,"foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s3"); final Path newfoo=new Path(sdir1,"foo"); hdfs.rename(foo,newfoo); restartClusterAndCheckImage(true); final Path bar2=new Path(newfoo,"bar2"); DFSTestUtil.createFile(hdfs,bar2,BLOCKSIZE,REPL,SEED); hdfs.createSnapshot(sdir1,"s4"); hdfs.delete(newfoo,true); final Path bar2_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar2"); assertTrue(hdfs.exists(bar2_s4)); final Path bar_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo/bar"); assertTrue(hdfs.exists(bar_s4)); hdfs.deleteSnapshot(sdir1,"s4"); restartClusterAndCheckImage(true); Path bar_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar"); assertFalse(hdfs.exists(bar_s3)); bar_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar"); assertTrue(hdfs.exists(bar_s3)); Path bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo/bar2"); assertFalse(hdfs.exists(bar2_s3)); bar2_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo/bar2"); assertFalse(hdfs.exists(bar2_s3)); hdfs.deleteSnapshot(sdir2,"s3"); final Path bar_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo/bar"); assertTrue(hdfs.exists(bar_s2)); INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory(); Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo"); INodeReference fooRef=fsdir.getINode(foo_s2.toString()).asReference(); assertTrue(fooRef instanceof INodeReference.WithName); INodeReference.WithCount fooWC=(WithCount)fooRef.getReferredINode(); assertEquals(1,fooWC.getReferenceCount()); INodeDirectory fooDir=fooWC.getReferredINode().asDirectory(); List diffs=fooDir.getDiffs().asList(); assertEquals(1,diffs.size()); assertEquals(s2.getId(),diffs.get(0).getSnapshotId()); restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir2,"s2"); assertFalse(hdfs.exists(bar_s2)); restartClusterAndCheckImage(true); Quota.Counts q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(4,q.get(Quota.NAMESPACE)); assertEquals(0,q.get(Quota.DISKSPACE)); hdfs.deleteSnapshot(sdir1,"s1"); restartClusterAndCheckImage(true); q=fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed(); assertEquals(3,q.get(Quota.NAMESPACE)); assertEquals(0,q.get(Quota.DISKSPACE)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRenameWithOverWrite() throws Exception { final Path root=new Path("/"); final Path foo=new Path(root,"foo"); final Path file1InFoo=new Path(foo,"file1"); final Path file2InFoo=new Path(foo,"file2"); final Path file3InFoo=new Path(foo,"file3"); DFSTestUtil.createFile(hdfs,file1InFoo,1L,REPL,SEED); DFSTestUtil.createFile(hdfs,file2InFoo,1L,REPL,SEED); DFSTestUtil.createFile(hdfs,file3InFoo,1L,REPL,SEED); final Path bar=new Path(root,"bar"); hdfs.mkdirs(bar); SnapshotTestHelper.createSnapshot(hdfs,root,"s0"); final Path fileInBar=new Path(bar,"file1"); hdfs.rename(file1InFoo,fileInBar); final Path newDir=new Path(root,"newDir"); hdfs.rename(bar,newDir); final Path file2InNewDir=new Path(newDir,"file2"); hdfs.rename(file2InFoo,file2InNewDir); final Path file1InNewDir=new Path(newDir,"file1"); hdfs.rename(file3InFoo,file1InNewDir,Rename.OVERWRITE); SnapshotTestHelper.createSnapshot(hdfs,root,"s1"); SnapshotDiffReport report=hdfs.getSnapshotDiffReport(root,"s0","s1"); LOG.info("DiffList is \n\"" + report.toString() + "\""); List entries=report.getDiffList(); assertEquals(7,entries.size()); assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null)); assertTrue(existsInDiffReport(entries,DiffType.MODIFY,foo.getName(),null)); assertTrue(existsInDiffReport(entries,DiffType.MODIFY,bar.getName(),null)); assertTrue(existsInDiffReport(entries,DiffType.DELETE,"foo/file1",null)); assertTrue(existsInDiffReport(entries,DiffType.RENAME,"bar","newDir")); assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file2","newDir/file2")); assertTrue(existsInDiffReport(entries,DiffType.RENAME,"foo/file3","newDir/file1")); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test renaming a file and then delete snapshots. */ @Test public void testRenameFileAndDeleteSnapshot() throws Exception { final Path sdir1=new Path("/dir1"); final Path sdir2=new Path("/dir2"); hdfs.mkdirs(sdir1); hdfs.mkdirs(sdir2); final Path foo=new Path(sdir2,"foo"); DFSTestUtil.createFile(hdfs,foo,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sdir1,"s1"); SnapshotTestHelper.createSnapshot(hdfs,sdir2,"s2"); hdfs.createSnapshot(sdir1,"s3"); final Path newfoo=new Path(sdir1,"foo"); hdfs.rename(foo,newfoo); hdfs.setReplication(newfoo,REPL_1); hdfs.createSnapshot(sdir1,"s4"); hdfs.setReplication(newfoo,REPL_2); FileStatus status=hdfs.getFileStatus(newfoo); assertEquals(REPL_2,status.getReplication()); final Path foo_s4=SnapshotTestHelper.getSnapshotPath(sdir1,"s4","foo"); status=hdfs.getFileStatus(foo_s4); assertEquals(REPL_1,status.getReplication()); hdfs.createSnapshot(sdir1,"s5"); final Path foo_s5=SnapshotTestHelper.getSnapshotPath(sdir1,"s5","foo"); status=hdfs.getFileStatus(foo_s5); assertEquals(REPL_2,status.getReplication()); hdfs.deleteSnapshot(sdir1,"s5"); restartClusterAndCheckImage(true); assertFalse(hdfs.exists(foo_s5)); status=hdfs.getFileStatus(foo_s4); assertEquals(REPL_1,status.getReplication()); hdfs.deleteSnapshot(sdir1,"s4"); assertFalse(hdfs.exists(foo_s4)); Path foo_s3=SnapshotTestHelper.getSnapshotPath(sdir1,"s3","foo"); assertFalse(hdfs.exists(foo_s3)); foo_s3=SnapshotTestHelper.getSnapshotPath(sdir2,"s3","foo"); assertFalse(hdfs.exists(foo_s3)); final Path foo_s2=SnapshotTestHelper.getSnapshotPath(sdir2,"s2","foo"); assertTrue(hdfs.exists(foo_s2)); status=hdfs.getFileStatus(foo_s2); assertEquals(REPL,status.getReplication()); INodeFile snode=fsdir.getINode(newfoo.toString()).asFile(); assertEquals(1,snode.getDiffs().asList().size()); INodeDirectory sdir2Node=fsdir.getINode(sdir2.toString()).asDirectory(); Snapshot s2=sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2")); assertEquals(s2.getId(),snode.getDiffs().getLastSnapshotId()); restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir2,"s2"); assertFalse(hdfs.exists(foo_s2)); restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1,"s3"); restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1,"s1"); restartClusterAndCheckImage(true); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testRenameDirectoryInSnapshot() throws Exception { final Path sub2=new Path(sub1,"sub2"); final Path sub3=new Path(sub1,"sub3"); final Path sub2file1=new Path(sub2,"sub2file1"); final String sub1snap1="sub1snap1"; hdfs.mkdirs(sub1); hdfs.mkdirs(sub2); DFSTestUtil.createFile(hdfs,sub2file1,BLOCKSIZE,REPL,SEED); SnapshotTestHelper.createSnapshot(hdfs,sub1,sub1snap1); hdfs.rename(sub2,sub3); SnapshotDiffReport diffReport=hdfs.getSnapshotDiffReport(sub1,sub1snap1,""); LOG.info("DiffList is \n\"" + diffReport.toString() + "\""); List entries=diffReport.getDiffList(); assertEquals(2,entries.size()); assertTrue(existsInDiffReport(entries,DiffType.MODIFY,"",null)); assertTrue(existsInDiffReport(entries,DiffType.RENAME,sub2.getName(),sub3.getName())); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSetQuotaWithSnapshot

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
/** * Test clear quota of a snapshottable dir or a dir with snapshot. */ @Test public void testClearQuota() throws Exception { final Path dir=new Path("/TestSnapshot"); hdfs.mkdirs(dir); hdfs.allowSnapshot(dir); hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET,HdfsConstants.QUOTA_DONT_SET); INodeDirectory dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); assertEquals(0,dirNode.getDiffs().asList().size()); hdfs.setQuota(dir,HdfsConstants.QUOTA_DONT_SET - 1,HdfsConstants.QUOTA_DONT_SET - 1); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); assertEquals(0,dirNode.getDiffs().asList().size()); hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); assertEquals(0,dirNode.getDiffs().asList().size()); SnapshotTestHelper.createSnapshot(hdfs,dir,"s1"); hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); assertEquals(1,dirNode.getDiffs().asList().size()); SnapshottableDirectoryStatus[] status=hdfs.getSnapshottableDirListing(); assertEquals(1,status.length); assertEquals(dir,status[0].getFullPath()); final Path subDir=new Path(dir,"sub"); hdfs.mkdirs(subDir); hdfs.createSnapshot(dir,"s2"); final Path file=new Path(subDir,"file"); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed); hdfs.setQuota(dir,HdfsConstants.QUOTA_RESET,HdfsConstants.QUOTA_RESET); INode subNode=fsdir.getINode4Write(subDir.toString()); assertTrue(subNode.asDirectory().isWithSnapshot()); List diffList=subNode.asDirectory().getDiffs().asList(); assertEquals(1,diffList.size()); Snapshot s2=dirNode.getSnapshot(DFSUtil.string2Bytes("s2")); assertEquals(s2.getId(),diffList.get(0).getSnapshotId()); List createdList=diffList.get(0).getChildrenDiff().getList(ListType.CREATED); assertEquals(1,createdList.size()); assertSame(fsdir.getINode4Write(file.toString()),createdList.get(0)); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshot

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * A simple test that updates a sub-directory of a snapshottable directory * with snapshots */ @Test(timeout=60000) public void testUpdateDirectory() throws Exception { Path dir=new Path("/dir"); Path sub=new Path(dir,"sub"); Path subFile=new Path(sub,"file"); DFSTestUtil.createFile(hdfs,subFile,BLOCKSIZE,REPLICATION,seed); FileStatus oldStatus=hdfs.getFileStatus(sub); hdfs.allowSnapshot(dir); hdfs.createSnapshot(dir,"s1"); hdfs.setTimes(sub,100L,100L); Path snapshotPath=SnapshotTestHelper.getSnapshotPath(dir,"s1","sub"); FileStatus snapshotStatus=hdfs.getFileStatus(snapshotPath); assertEquals(oldStatus.getModificationTime(),snapshotStatus.getModificationTime()); assertEquals(oldStatus.getAccessTime(),snapshotStatus.getAccessTime()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test multiple calls of allowSnapshot and disallowSnapshot, to make sure * they are idempotent */ @Test public void testAllowAndDisallowSnapshot() throws Exception { final Path dir=new Path("/dir"); final Path file0=new Path(dir,"file0"); final Path file1=new Path(dir,"file1"); DFSTestUtil.createFile(hdfs,file0,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed); INodeDirectory dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertFalse(dirNode.isSnapshottable()); hdfs.allowSnapshot(dir); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); hdfs.allowSnapshot(dir); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertTrue(dirNode.isSnapshottable()); hdfs.disallowSnapshot(dir); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertFalse(dirNode.isSnapshottable()); hdfs.disallowSnapshot(dir); dirNode=fsdir.getINode4Write(dir.toString()).asDirectory(); assertFalse(dirNode.isSnapshottable()); final Path root=new Path("/"); INodeDirectory rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); assertTrue(rootNode.isSnapshottable()); assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota()); hdfs.allowSnapshot(root); rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); assertTrue(rootNode.isSnapshottable()); assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota()); hdfs.allowSnapshot(root); rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); assertTrue(rootNode.isSnapshottable()); assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota()); hdfs.disallowSnapshot(root); rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); assertTrue(rootNode.isSnapshottable()); assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota()); hdfs.disallowSnapshot(root); rootNode=fsdir.getINode4Write(root.toString()).asDirectory(); assertTrue(rootNode.isSnapshottable()); assertEquals(0,rootNode.getDirectorySnapshottableFeature().getSnapshotQuota()); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotBlocksMap

InternalCallVerifier EqualityVerifier 
/** * Make sure that a delete of a non-zero-length file which results in a * zero-length file in a snapshot works. */ @Test public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception { final Path foo=new Path("/foo"); final Path bar=new Path(foo,"bar"); final byte[] testData="foo bar baz".getBytes(); DFSTestUtil.createFile(hdfs,bar,0,REPLICATION,0L); assertEquals(0,fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length); SnapshotTestHelper.createSnapshot(hdfs,foo,"s0"); FSDataOutputStream out=hdfs.append(bar); out.write(testData); out.close(); INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile(); BlockInfo[] blks=barNode.getBlocks(); assertEquals(1,blks.length); assertEquals(testData.length,blks[0].getNumBytes()); hdfs.delete(bar,true); cluster.getNameNode().getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER,false); cluster.getNameNode().getRpcServer().saveNamespace(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot */ @Test public void testDeletionWithZeroSizeBlock() throws Exception { final Path foo=new Path("/foo"); final Path bar=new Path(foo,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L); SnapshotTestHelper.createSnapshot(hdfs,foo,"s0"); hdfs.append(bar); INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile(); BlockInfo[] blks=barNode.getBlocks(); assertEquals(1,blks.length); assertEquals(BLOCKSIZE,blks[0].getNumBytes()); ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]); cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null); SnapshotTestHelper.createSnapshot(hdfs,foo,"s1"); barNode=fsdir.getINode4Write(bar.toString()).asFile(); blks=barNode.getBlocks(); assertEquals(2,blks.length); assertEquals(BLOCKSIZE,blks[0].getNumBytes()); assertEquals(0,blks[1].getNumBytes()); hdfs.delete(bar,true); final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1",bar.getName()); barNode=fsdir.getINode(sbar.toString()).asFile(); blks=barNode.getBlocks(); assertEquals(1,blks.length); assertEquals(BLOCKSIZE,blks[0].getNumBytes()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * 1. rename under-construction file with 0-sized blocks after snapshot. * 2. delete the renamed directory. * make sure we delete the 0-sized block. * see HDFS-5476. */ @Test public void testDeletionWithZeroSizeBlock3() throws Exception { final Path foo=new Path("/foo"); final Path subDir=new Path(foo,"sub"); final Path bar=new Path(subDir,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L); hdfs.append(bar); INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile(); BlockInfo[] blks=barNode.getBlocks(); assertEquals(1,blks.length); ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]); cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null); SnapshotTestHelper.createSnapshot(hdfs,foo,"s1"); final Path bar2=new Path(subDir,"bar2"); hdfs.rename(bar,bar2); INodeFile bar2Node=fsdir.getINode4Write(bar2.toString()).asFile(); blks=bar2Node.getBlocks(); assertEquals(2,blks.length); assertEquals(BLOCKSIZE,blks[0].getNumBytes()); assertEquals(0,blks[1].getNumBytes()); hdfs.delete(subDir,true); final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1","sub/bar"); barNode=fsdir.getINode(sbar.toString()).asFile(); blks=barNode.getBlocks(); assertEquals(1,blks.length); assertEquals(BLOCKSIZE,blks[0].getNumBytes()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Make sure we delete 0-sized block when deleting an under-construction file */ @Test public void testDeletionWithZeroSizeBlock2() throws Exception { final Path foo=new Path("/foo"); final Path subDir=new Path(foo,"sub"); final Path bar=new Path(subDir,"bar"); DFSTestUtil.createFile(hdfs,bar,BLOCKSIZE,REPLICATION,0L); hdfs.append(bar); INodeFile barNode=fsdir.getINode4Write(bar.toString()).asFile(); BlockInfo[] blks=barNode.getBlocks(); assertEquals(1,blks.length); ExtendedBlock previous=new ExtendedBlock(fsn.getBlockPoolId(),blks[0]); cluster.getNameNodeRpc().addBlock(bar.toString(),hdfs.getClient().getClientName(),previous,null,barNode.getId(),null); SnapshotTestHelper.createSnapshot(hdfs,foo,"s1"); barNode=fsdir.getINode4Write(bar.toString()).asFile(); blks=barNode.getBlocks(); assertEquals(2,blks.length); assertEquals(BLOCKSIZE,blks[0].getNumBytes()); assertEquals(0,blks[1].getNumBytes()); hdfs.delete(subDir,true); final Path sbar=SnapshotTestHelper.getSnapshotPath(foo,"s1","sub/bar"); barNode=fsdir.getINode(sbar.toString()).asFile(); blks=barNode.getBlocks(); assertEquals(1,blks.length); assertEquals(BLOCKSIZE,blks[0].getNumBytes()); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotDeletion

InternalCallVerifier EqualityVerifier 
/** * Test deleting snapshots with modification on the metadata of directory */ @Test(timeout=300000) public void testDeleteSnapshotWithDirModification() throws Exception { Path file=new Path(sub,"file"); DFSTestUtil.createFile(hdfs,file,BLOCKSIZE,REPLICATION,seed); hdfs.setOwner(sub,"user1","group1"); SnapshotTestHelper.createSnapshot(hdfs,sub,"s1"); checkQuotaUsageComputation(sub,3,BLOCKSIZE * 3); hdfs.setOwner(sub,"user2","group2"); checkQuotaUsageComputation(sub,3,BLOCKSIZE * 3); hdfs.createSnapshot(sub,"s2"); checkQuotaUsageComputation(sub,4,BLOCKSIZE * 3); hdfs.createSnapshot(sub,"s3"); checkQuotaUsageComputation(sub,5,BLOCKSIZE * 3); hdfs.setOwner(sub,"user3","group3"); checkQuotaUsageComputation(sub,5,BLOCKSIZE * 3); hdfs.deleteSnapshot(sub,"s3"); checkQuotaUsageComputation(sub,4,BLOCKSIZE * 3); FileStatus statusOfS2=hdfs.getFileStatus(new Path(sub,HdfsConstants.DOT_SNAPSHOT_DIR + "/s2")); assertEquals("user2",statusOfS2.getOwner()); assertEquals("group2",statusOfS2.getGroup()); hdfs.deleteSnapshot(sub,"s2"); checkQuotaUsageComputation(sub,3,BLOCKSIZE * 3); FileStatus statusOfS1=hdfs.getFileStatus(new Path(sub,HdfsConstants.DOT_SNAPSHOT_DIR + "/s1")); assertEquals("user1",statusOfS1.getOwner()); assertEquals("group1",statusOfS1.getGroup()); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test deleting the earliest (first) snapshot. In this simplest scenario, the * snapshots are taken on the same directory, and we do not need to combine * snapshot diffs. */ @Test(timeout=300000) public void testDeleteEarliestSnapshot1() throws Exception { Path file0=new Path(sub,"file0"); Path file1=new Path(sub,"file1"); DFSTestUtil.createFile(hdfs,file0,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed); String snapshotName="s1"; try { hdfs.deleteSnapshot(sub,snapshotName); fail("SnapshotException expected: " + sub.toString() + " is not snapshottable yet"); } catch ( Exception e) { GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + sub,e); } hdfs.allowSnapshot(sub); try { hdfs.deleteSnapshot(sub,snapshotName); fail("SnapshotException expected: snapshot " + snapshotName + " does not exist for "+ sub.toString()); } catch ( Exception e) { GenericTestUtils.assertExceptionContains("Cannot delete snapshot " + snapshotName + " from path "+ sub.toString()+ ": the snapshot does not exist.",e); } SnapshotTestHelper.createSnapshot(hdfs,sub,snapshotName); checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2); hdfs.deleteSnapshot(sub,snapshotName); checkQuotaUsageComputation(sub,3,BLOCKSIZE * REPLICATION * 2); hdfs.createSnapshot(sub,snapshotName); checkQuotaUsageComputation(sub,4,BLOCKSIZE * REPLICATION * 2); Path newFile=new Path(sub,"newFile"); DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed); String snapshotName2="s2"; hdfs.createSnapshot(sub,snapshotName2); checkQuotaUsageComputation(sub,6,BLOCKSIZE * REPLICATION * 3); Path ss=SnapshotTestHelper.getSnapshotPath(sub,snapshotName2,"newFile"); FileStatus statusBeforeDeletion=hdfs.getFileStatus(ss); hdfs.deleteSnapshot(sub,snapshotName); checkQuotaUsageComputation(sub,5,BLOCKSIZE * REPLICATION * 3); FileStatus statusAfterDeletion=hdfs.getFileStatus(ss); System.out.println("Before deletion: " + statusBeforeDeletion.toString() + "\n"+ "After deletion: "+ statusAfterDeletion.toString()); assertEquals(statusBeforeDeletion.toString(),statusAfterDeletion.toString()); }

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test deleting the earliest (first) snapshot. In this more complicated * scenario, the snapshots are taken across directories. *
 * The test covers the following scenarios:
 * 1. delete the first diff in the diff list of a directory
 * 2. delete the first diff in the diff list of a file
 * 
* Also, the recursive cleanTree process should cover both INodeFile and * INodeDirectory. */ @Test(timeout=300000) public void testDeleteEarliestSnapshot2() throws Exception { Path noChangeDir=new Path(sub,"noChangeDir"); Path noChangeFile=new Path(noChangeDir,"noChangeFile"); Path metaChangeFile=new Path(noChangeDir,"metaChangeFile"); Path metaChangeDir=new Path(noChangeDir,"metaChangeDir"); Path toDeleteFile=new Path(metaChangeDir,"toDeleteFile"); DFSTestUtil.createFile(hdfs,noChangeFile,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,metaChangeFile,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,toDeleteFile,BLOCKSIZE,REPLICATION,seed); final INodeFile toDeleteFileNode=TestSnapshotBlocksMap.assertBlockCollection(toDeleteFile.toString(),1,fsdir,blockmanager); BlockInfo[] blocks=toDeleteFileNode.getBlocks(); SnapshotTestHelper.createSnapshot(hdfs,dir,"s0"); checkQuotaUsageComputation(dir,8,3 * BLOCKSIZE * REPLICATION); hdfs.delete(toDeleteFile,true); checkQuotaUsageComputation(dir,10,3 * BLOCKSIZE * REPLICATION); hdfs.setReplication(metaChangeFile,REPLICATION_1); hdfs.setOwner(metaChangeDir,"unknown","unknown"); checkQuotaUsageComputation(dir,11,3 * BLOCKSIZE * REPLICATION); hdfs.createSnapshot(dir,"s1"); checkQuotaUsageComputation(dir,12,3 * BLOCKSIZE * REPLICATION); hdfs.deleteSnapshot(dir,"s0"); checkQuotaUsageComputation(dir,7,2 * BLOCKSIZE * REPLICATION - BLOCKSIZE); for ( BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } final INodeDirectory dirNode=fsdir.getINode(dir.toString()).asDirectory(); Snapshot snapshot0=dirNode.getSnapshot(DFSUtil.string2Bytes("s0")); assertNull(snapshot0); Snapshot snapshot1=dirNode.getSnapshot(DFSUtil.string2Bytes("s1")); DirectoryDiffList diffList=dirNode.getDiffs(); assertEquals(1,diffList.asList().size()); assertEquals(snapshot1.getId(),diffList.getLast().getSnapshotId()); diffList=fsdir.getINode(metaChangeDir.toString()).asDirectory().getDiffs(); assertEquals(0,diffList.asList().size()); final INodeDirectory noChangeDirNode=(INodeDirectory)fsdir.getINode(noChangeDir.toString()); assertEquals(INodeDirectory.class,noChangeDirNode.getClass()); final INodeFile noChangeFileNode=(INodeFile)fsdir.getINode(noChangeFile.toString()); assertEquals(INodeFile.class,noChangeFileNode.getClass()); TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(),1,fsdir,blockmanager); FileStatus status=hdfs.getFileStatus(metaChangeDir); assertEquals("unknown",status.getOwner()); assertEquals("unknown",status.getGroup()); status=hdfs.getFileStatus(metaChangeFile); assertEquals(REPLICATION_1,status.getReplication()); TestSnapshotBlocksMap.assertBlockCollection(metaChangeFile.toString(),1,fsdir,blockmanager); try { status=hdfs.getFileStatus(toDeleteFile); fail("should throw FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFile.toString(),e); } final Path toDeleteFileInSnapshot=SnapshotTestHelper.getSnapshotPath(dir,"s0",toDeleteFile.toString().substring(dir.toString().length())); try { status=hdfs.getFileStatus(toDeleteFileInSnapshot); fail("should throw FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFileInSnapshot.toString(),e); } }

InternalCallVerifier EqualityVerifier 
/** * Test applying editlog of operation which deletes a snapshottable directory * without snapshots. The snapshottable dir list in snapshot manager should be * updated. */ @Test(timeout=300000) public void testApplyEditLogForDeletion() throws Exception { final Path foo=new Path("/foo"); final Path bar1=new Path(foo,"bar1"); final Path bar2=new Path(foo,"bar2"); hdfs.mkdirs(bar1); hdfs.mkdirs(bar2); hdfs.allowSnapshot(bar1); hdfs.allowSnapshot(bar2); assertEquals(2,cluster.getNamesystem().getSnapshotManager().getNumSnapshottableDirs()); assertEquals(2,cluster.getNamesystem().getSnapshotManager().getSnapshottableDirs().length); hdfs.delete(foo,true); cluster.restartNameNode(0); assertEquals(0,cluster.getNamesystem().getSnapshotManager().getNumSnapshottableDirs()); assertEquals(0,cluster.getNamesystem().getSnapshotManager().getSnapshottableDirs().length); hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER); hdfs.saveNamespace(); hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE); cluster.restartNameNode(0); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test deleting a directory which is a descendant of a snapshottable * directory. In the test we need to cover the following cases: *
 * 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
 * 2. Delete current INodeFile/INodeDirectory while snapshots have been taken 
 * on ancestor(s).
 * 3. Delete current INodeFileWithSnapshot.
 * 4. Delete current INodeDirectoryWithSnapshot.
 * 
*/ @Test(timeout=300000) public void testDeleteCurrentFileDirectory() throws Exception { Path deleteDir=new Path(subsub,"deleteDir"); Path deleteFile=new Path(deleteDir,"deleteFile"); Path noChangeDirParent=new Path(sub,"noChangeDirParent"); Path noChangeDir=new Path(noChangeDirParent,"noChangeDir"); Path noChangeFile=new Path(noChangeDir,"noChangeFile"); DFSTestUtil.createFile(hdfs,deleteFile,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,noChangeFile,BLOCKSIZE,REPLICATION,seed); Path metaChangeFile1=new Path(subsub,"metaChangeFile1"); DFSTestUtil.createFile(hdfs,metaChangeFile1,BLOCKSIZE,REPLICATION,seed); Path metaChangeFile2=new Path(noChangeDir,"metaChangeFile2"); DFSTestUtil.createFile(hdfs,metaChangeFile2,BLOCKSIZE,REPLICATION,seed); hdfs.delete(deleteDir,true); SnapshotTestHelper.createSnapshot(hdfs,dir,"s0"); Path tempDir=new Path(dir,"tempdir"); Path tempFile=new Path(tempDir,"tempfile"); DFSTestUtil.createFile(hdfs,tempFile,BLOCKSIZE,REPLICATION,seed); final INodeFile temp=TestSnapshotBlocksMap.assertBlockCollection(tempFile.toString(),1,fsdir,blockmanager); BlockInfo[] blocks=temp.getBlocks(); hdfs.delete(tempDir,true); checkQuotaUsageComputation(dir,9L,BLOCKSIZE * REPLICATION * 3); for ( BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } Path newFileAfterS0=new Path(subsub,"newFile"); DFSTestUtil.createFile(hdfs,newFileAfterS0,BLOCKSIZE,REPLICATION,seed); hdfs.setReplication(metaChangeFile1,REPLICATION_1); hdfs.setReplication(metaChangeFile2,REPLICATION_1); SnapshotTestHelper.createSnapshot(hdfs,dir,"s1"); checkQuotaUsageComputation(dir,14L,BLOCKSIZE * REPLICATION * 4); Snapshot snapshot0=fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s0")); Snapshot snapshot1=fsdir.getINode(dir.toString()).asDirectory().getSnapshot(DFSUtil.string2Bytes("s1")); hdfs.delete(noChangeDirParent,true); checkQuotaUsageComputation(dir,17L,BLOCKSIZE * REPLICATION * 4); Path snapshotNoChangeDir=SnapshotTestHelper.getSnapshotPath(dir,"s1",sub.getName() + "/" + noChangeDirParent.getName()+ "/"+ noChangeDir.getName()); INodeDirectory snapshotNode=(INodeDirectory)fsdir.getINode(snapshotNoChangeDir.toString()); assertEquals(INodeDirectory.class,snapshotNode.getClass()); ReadOnlyList children=snapshotNode.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(2,children.size()); INode noChangeFileSCopy=children.get(1); assertEquals(noChangeFile.getName(),noChangeFileSCopy.getLocalName()); assertEquals(INodeFile.class,noChangeFileSCopy.getClass()); TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,noChangeFileSCopy.getLocalName()).toString(),1,fsdir,blockmanager); INodeFile metaChangeFile2SCopy=children.get(0).asFile(); assertEquals(metaChangeFile2.getName(),metaChangeFile2SCopy.getLocalName()); assertTrue(metaChangeFile2SCopy.isWithSnapshot()); assertFalse(metaChangeFile2SCopy.isUnderConstruction()); TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,metaChangeFile2SCopy.getLocalName()).toString(),1,fsdir,blockmanager); assertEquals(REPLICATION_1,metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID)); assertEquals(REPLICATION_1,metaChangeFile2SCopy.getFileReplication(snapshot1.getId())); assertEquals(REPLICATION,metaChangeFile2SCopy.getFileReplication(snapshot0.getId())); Path newFile=new Path(sub,"newFile"); DFSTestUtil.createFile(hdfs,newFile,BLOCKSIZE,REPLICATION,seed); final INodeFile newFileNode=TestSnapshotBlocksMap.assertBlockCollection(newFile.toString(),1,fsdir,blockmanager); blocks=newFileNode.getBlocks(); checkQuotaUsageComputation(dir,18L,BLOCKSIZE * REPLICATION * 5); hdfs.delete(sub,true); checkQuotaUsageComputation(dir,19L,BLOCKSIZE * REPLICATION * 4); for ( BlockInfo b : blocks) { assertNull(blockmanager.getBlockCollection(b)); } Path snapshotSub=SnapshotTestHelper.getSnapshotPath(dir,"s1",sub.getName()); INodeDirectory snapshotNode4Sub=fsdir.getINode(snapshotSub.toString()).asDirectory(); assertTrue(snapshotNode4Sub.isWithSnapshot()); assertEquals(1,snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).size()); assertEquals(2,snapshotNode4Sub.getChildrenList(snapshot1.getId()).size()); INode snapshotNode4Subsub=snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID).get(0); assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot()); assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent()); INodeDirectory snapshotSubsubDir=(INodeDirectory)snapshotNode4Subsub; children=snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID); assertEquals(2,children.size()); assertEquals(children.get(0).getLocalName(),metaChangeFile1.getName()); assertEquals(children.get(1).getLocalName(),newFileAfterS0.getName()); children=snapshotSubsubDir.getChildrenList(snapshot0.getId()); assertEquals(1,children.size()); INode child=children.get(0); assertEquals(child.getLocalName(),metaChangeFile1.getName()); INodeFile metaChangeFile1SCopy=child.asFile(); assertTrue(metaChangeFile1SCopy.isWithSnapshot()); assertFalse(metaChangeFile1SCopy.isUnderConstruction()); assertEquals(REPLICATION_1,metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID)); assertEquals(REPLICATION_1,metaChangeFile1SCopy.getFileReplication(snapshot1.getId())); assertEquals(REPLICATION,metaChangeFile1SCopy.getFileReplication(snapshot0.getId())); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * A test covering the case where the snapshot diff to be deleted is renamed * to its previous snapshot. */ @Test(timeout=300000) public void testRenameSnapshotDiff() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); final Path subFile0=new Path(sub,"file0"); final Path subsubFile0=new Path(subsub,"file0"); DFSTestUtil.createFile(hdfs,subFile0,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,subsubFile0,BLOCKSIZE,REPLICATION,seed); hdfs.setOwner(subsub,"owner","group"); SnapshotTestHelper.createSnapshot(hdfs,sub,"s0"); checkQuotaUsageComputation(sub,5,BLOCKSIZE * 6); final Path subFile1=new Path(sub,"file1"); final Path subsubFile1=new Path(subsub,"file1"); DFSTestUtil.createFile(hdfs,subFile1,BLOCKSIZE,REPLICATION_1,seed); DFSTestUtil.createFile(hdfs,subsubFile1,BLOCKSIZE,REPLICATION,seed); checkQuotaUsageComputation(sub,8,BLOCKSIZE * 11); SnapshotTestHelper.createSnapshot(hdfs,sub,"s1"); checkQuotaUsageComputation(sub,9,BLOCKSIZE * 11); SnapshotTestHelper.createSnapshot(hdfs,dir,"s2"); checkQuotaUsageComputation(dir,11,BLOCKSIZE * 11); checkQuotaUsageComputation(sub,9,BLOCKSIZE * 11); hdfs.setOwner(subsub,"unknown","unknown"); hdfs.setReplication(subsubFile1,REPLICATION_1); checkQuotaUsageComputation(dir,13,BLOCKSIZE * 11); checkQuotaUsageComputation(sub,11,BLOCKSIZE * 11); hdfs.delete(subFile1,true); checkQuotaUsageComputation(new Path("/"),16,BLOCKSIZE * 11); checkQuotaUsageComputation(dir,15,BLOCKSIZE * 11); checkQuotaUsageComputation(sub,13,BLOCKSIZE * 11); Path subsubSnapshotCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subsub.getName()); Path subsubFile1SCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subsub.getName()+ Path.SEPARATOR+ subsubFile1.getName()); Path subFile1SCopy=SnapshotTestHelper.getSnapshotPath(dir,"s2",sub.getName() + Path.SEPARATOR + subFile1.getName()); FileStatus subsubStatus=hdfs.getFileStatus(subsubSnapshotCopy); assertEquals("owner",subsubStatus.getOwner()); assertEquals("group",subsubStatus.getGroup()); FileStatus subsubFile1Status=hdfs.getFileStatus(subsubFile1SCopy); assertEquals(REPLICATION,subsubFile1Status.getReplication()); FileStatus subFile1Status=hdfs.getFileStatus(subFile1SCopy); assertEquals(REPLICATION_1,subFile1Status.getReplication()); hdfs.deleteSnapshot(dir,"s2"); checkQuotaUsageComputation(new Path("/"),14,BLOCKSIZE * 11); checkQuotaUsageComputation(dir,13,BLOCKSIZE * 11); checkQuotaUsageComputation(sub,12,BLOCKSIZE * 11); try { hdfs.getFileStatus(subsubSnapshotCopy); fail("should throw FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("File does not exist: " + subsubSnapshotCopy.toString(),e); } try { hdfs.getFileStatus(subsubFile1SCopy); fail("should throw FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("File does not exist: " + subsubFile1SCopy.toString(),e); } try { hdfs.getFileStatus(subFile1SCopy); fail("should throw FileNotFoundException"); } catch ( FileNotFoundException e) { GenericTestUtils.assertExceptionContains("File does not exist: " + subFile1SCopy.toString(),e); } subsubSnapshotCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subsub.getName()); subsubFile1SCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subsub.getName() + Path.SEPARATOR + subsubFile1.getName()); subFile1SCopy=SnapshotTestHelper.getSnapshotPath(sub,"s1",subFile1.getName()); subsubStatus=hdfs.getFileStatus(subsubSnapshotCopy); assertEquals("owner",subsubStatus.getOwner()); assertEquals("group",subsubStatus.getGroup()); subsubFile1Status=hdfs.getFileStatus(subsubFile1SCopy); assertEquals(REPLICATION,subsubFile1Status.getReplication()); subFile1Status=hdfs.getFileStatus(subFile1SCopy); assertEquals(REPLICATION_1,subFile1Status.getReplication()); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotDiffReport

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test the computation and representation of diff between snapshots */ @Test(timeout=60000) public void testDiffReport() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); Path subsub1=new Path(sub1,"subsub1"); Path subsubsub1=new Path(subsub1,"subsubsub1"); hdfs.mkdirs(subsubsub1); modifyAndCreateSnapshot(sub1,new Path[]{sub1,subsubsub1}); modifyAndCreateSnapshot(subsubsub1,new Path[]{sub1,subsubsub1}); try { hdfs.getSnapshotDiffReport(subsub1,"s1","s2"); fail("Expect exception when getting snapshot diff report: " + subsub1 + " is not a snapshottable directory."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + subsub1,e); } final String invalidName="invalid"; try { hdfs.getSnapshotDiffReport(sub1,invalidName,invalidName); fail("Expect exception when providing invalid snapshot name for diff report"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Cannot find the snapshot of directory " + sub1 + " with name "+ invalidName,e); } SnapshotDiffReport report=hdfs.getSnapshotDiffReport(sub1,"s0","s0"); System.out.println(report); assertEquals(0,report.getDiffList().size()); report=hdfs.getSnapshotDiffReport(sub1,"",""); System.out.println(report); assertEquals(0,report.getDiffList().size()); report=hdfs.getSnapshotDiffReport(subsubsub1,"s0","s2"); System.out.println(report); assertEquals(0,report.getDiffList().size()); report=hdfs.getSnapshotDiffReport(hdfs.makeQualified(subsubsub1),"s0","s2"); System.out.println(report); assertEquals(0,report.getDiffList().size()); verifyDiffReport(sub1,"s0","s2",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file15")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file12")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file13")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("link13"))); verifyDiffReport(sub1,"s0","s5",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file15")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file12")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file10")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("file11")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file13")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("link13")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file15"))); verifyDiffReport(sub1,"s2","s5",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("file10")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file15"))); verifyDiffReport(sub1,"s3","",new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file15")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("subsub1/subsubsub1/file12")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),new DiffReportEntry(DiffType.MODIFY,DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),new DiffReportEntry(DiffType.CREATE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),new DiffReportEntry(DiffType.DELETE,DFSUtil.string2Bytes("subsub1/subsubsub1/link13"))); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotFileLength

APIUtilityVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
/** * Adding as part of jira HDFS-5343 * Test for checking the cat command on snapshot path it * cannot read a file beyond snapshot file length * @throws Exception */ @Test(timeout=600000) public void testSnapshotFileLengthWithCatCommand() throws Exception { FSDataInputStream fis=null; FileStatus fileStatus=null; int bytesRead; byte[] buffer=new byte[BLOCKSIZE * 8]; hdfs.mkdirs(sub); Path file1=new Path(sub,file1Name); DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,SEED); hdfs.allowSnapshot(sub); hdfs.createSnapshot(sub,snapshot1); DFSTestUtil.appendFile(hdfs,file1,BLOCKSIZE); fileStatus=hdfs.getFileStatus(file1); assertEquals("Unexpected file length",BLOCKSIZE * 2,fileStatus.getLen()); fis=hdfs.open(file1); bytesRead=fis.read(buffer,0,buffer.length); assertEquals("Unexpected # bytes read",BLOCKSIZE * 2,bytesRead); fis.close(); Path file1snap1=SnapshotTestHelper.getSnapshotPath(sub,snapshot1,file1Name); fis=hdfs.open(file1snap1); fileStatus=hdfs.getFileStatus(file1snap1); assertEquals(fileStatus.getLen(),BLOCKSIZE); bytesRead=fis.read(buffer,0,buffer.length); assertEquals("Unexpected # bytes read",BLOCKSIZE,bytesRead); fis.close(); PrintStream outBackup=System.out; PrintStream errBackup=System.err; ByteArrayOutputStream bao=new ByteArrayOutputStream(); System.setOut(new PrintStream(bao)); System.setErr(new PrintStream(bao)); FsShell shell=new FsShell(); try { ToolRunner.run(conf,shell,new String[]{"-cat","/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1"}); assertEquals("Unexpected # bytes from -cat",BLOCKSIZE,bao.size()); } finally { System.setOut(outBackup); System.setErr(errBackup); } }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotListing

IterativeVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test listing snapshots under a snapshottable directory */ @Test(timeout=15000) public void testListSnapshots() throws Exception { final Path snapshotsPath=new Path(dir,".snapshot"); FileStatus[] stats=null; stats=hdfs.listStatus(new Path("/.snapshot")); assertEquals(0,stats.length); try { stats=hdfs.listStatus(snapshotsPath); fail("expect SnapshotException"); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("Directory is not a snapshottable directory: " + dir.toString(),e); } hdfs.allowSnapshot(dir); stats=hdfs.listStatus(snapshotsPath); assertEquals(0,stats.length); final int snapshotNum=5; for (int sNum=0; sNum < snapshotNum; sNum++) { hdfs.createSnapshot(dir,"s_" + sNum); stats=hdfs.listStatus(snapshotsPath); assertEquals(sNum + 1,stats.length); for (int i=0; i <= sNum; i++) { assertEquals("s_" + i,stats[i].getPath().getName()); } } for (int sNum=snapshotNum - 1; sNum > 0; sNum--) { hdfs.deleteSnapshot(dir,"s_" + sNum); stats=hdfs.listStatus(snapshotsPath); assertEquals(sNum,stats.length); for (int i=0; i < sNum; i++) { assertEquals("s_" + i,stats[i].getPath().getName()); } } hdfs.deleteSnapshot(dir,"s_0"); stats=hdfs.listStatus(snapshotsPath); assertEquals(0,stats.length); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotMetrics

InternalCallVerifier EqualityVerifier 
/** * Test the metric SnapshottableDirectories, AllowSnapshotOps, * DisallowSnapshotOps, and listSnapshottableDirOps */ @Test public void testSnapshottableDirs() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); assertGauge("SnapshottableDirectories",0,getMetrics(NS_METRICS)); assertCounter("AllowSnapshotOps",0L,getMetrics(NN_METRICS)); assertCounter("DisallowSnapshotOps",0L,getMetrics(NN_METRICS)); hdfs.allowSnapshot(sub1); assertGauge("SnapshottableDirectories",1,getMetrics(NS_METRICS)); assertCounter("AllowSnapshotOps",1L,getMetrics(NN_METRICS)); Path sub2=new Path(dir,"sub2"); Path file=new Path(sub2,"file"); DFSTestUtil.createFile(hdfs,file,1024,REPLICATION,seed); hdfs.allowSnapshot(sub2); assertGauge("SnapshottableDirectories",2,getMetrics(NS_METRICS)); assertCounter("AllowSnapshotOps",2L,getMetrics(NN_METRICS)); Path subsub1=new Path(sub1,"sub1sub1"); Path subfile=new Path(subsub1,"file"); DFSTestUtil.createFile(hdfs,subfile,1024,REPLICATION,seed); hdfs.allowSnapshot(subsub1); assertGauge("SnapshottableDirectories",3,getMetrics(NS_METRICS)); assertCounter("AllowSnapshotOps",3L,getMetrics(NN_METRICS)); hdfs.allowSnapshot(sub1); assertGauge("SnapshottableDirectories",3,getMetrics(NS_METRICS)); assertCounter("AllowSnapshotOps",4L,getMetrics(NN_METRICS)); hdfs.disallowSnapshot(sub1); assertGauge("SnapshottableDirectories",2,getMetrics(NS_METRICS)); assertCounter("DisallowSnapshotOps",1L,getMetrics(NN_METRICS)); hdfs.delete(subsub1,true); assertGauge("SnapshottableDirectories",1,getMetrics(NS_METRICS)); SnapshottableDirectoryStatus[] status=hdfs.getSnapshottableDirListing(); assertEquals(1,status.length); assertCounter("ListSnapshottableDirOps",1L,getMetrics(NN_METRICS)); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotRename

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test FileStatus of snapshot file before/after rename */ @Test(timeout=60000) public void testSnapshotRename() throws Exception { DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed); Path snapshotRoot=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s1"); Path ssPath=new Path(snapshotRoot,file1.getName()); assertTrue(hdfs.exists(ssPath)); FileStatus statusBeforeRename=hdfs.getFileStatus(ssPath); hdfs.renameSnapshot(sub1,"s1","s2"); assertFalse(hdfs.exists(ssPath)); snapshotRoot=SnapshotTestHelper.getSnapshotRoot(sub1,"s2"); ssPath=new Path(snapshotRoot,file1.getName()); assertTrue(hdfs.exists(ssPath)); FileStatus statusAfterRename=hdfs.getFileStatus(ssPath); assertFalse(statusBeforeRename.equals(statusAfterRename)); statusBeforeRename.setPath(statusAfterRename.getPath()); assertEquals(statusBeforeRename.toString(),statusAfterRename.toString()); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotReplication

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Test replication number calculation for a file with snapshots. */ @Test(timeout=60000) public void testReplicationWithSnapshot() throws Exception { short fileRep=1; DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,fileRep,seed); Map snapshotRepMap=new HashMap(); for (; fileRep < NUMDATANODE; ) { Path snapshotRoot=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s" + fileRep); Path snapshot=new Path(snapshotRoot,file1.getName()); assertEquals(fileRep,getINodeFile(snapshot).getFileReplication()); snapshotRepMap.put(snapshot,fileRep); hdfs.setReplication(file1,++fileRep); checkFileReplication(file1,fileRep,fileRep); checkSnapshotFileReplication(file1,snapshotRepMap,fileRep); } hdfs.setReplication(file1,REPLICATION); checkFileReplication(file1,REPLICATION,(short)(NUMDATANODE - 1)); checkSnapshotFileReplication(file1,snapshotRepMap,(short)(NUMDATANODE - 1)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test replication for a file with snapshots, also including the scenario * where the original file is deleted */ @Test(timeout=60000) public void testReplicationAfterDeletion() throws Exception { DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed); Map snapshotRepMap=new HashMap(); for (int i=1; i <= 3; i++) { Path root=SnapshotTestHelper.createSnapshot(hdfs,sub1,"s" + i); Path ssFile=new Path(root,file1.getName()); snapshotRepMap.put(ssFile,REPLICATION); } checkFileReplication(file1,REPLICATION,REPLICATION); checkSnapshotFileReplication(file1,snapshotRepMap,REPLICATION); hdfs.delete(file1,true); for ( Path ss : snapshotRepMap.keySet()) { final INodeFile ssInode=getINodeFile(ss); assertEquals(REPLICATION,ssInode.getBlockReplication()); assertEquals(snapshotRepMap.get(ss).shortValue(),ssInode.getFileReplication()); } }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshotStatsMXBean

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test getting SnapshotStatsMXBean information */ @Test public void testSnapshotStatsMXBeanInfo() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=null; String pathName="/snapshot"; Path path=new Path(pathName); try { cluster=new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); SnapshotManager sm=cluster.getNamesystem().getSnapshotManager(); DistributedFileSystem dfs=(DistributedFileSystem)cluster.getFileSystem(); dfs.mkdirs(path); dfs.allowSnapshot(path); dfs.createSnapshot(path); MBeanServer mbs=ManagementFactory.getPlatformMBeanServer(); ObjectName mxbeanName=new ObjectName("Hadoop:service=NameNode,name=SnapshotInfo"); CompositeData[] directories=(CompositeData[])mbs.getAttribute(mxbeanName,"SnapshottableDirectories"); int numDirectories=Array.getLength(directories); assertEquals(sm.getNumSnapshottableDirs(),numDirectories); CompositeData[] snapshots=(CompositeData[])mbs.getAttribute(mxbeanName,"Snapshots"); int numSnapshots=Array.getLength(snapshots); assertEquals(sm.getNumSnapshots(),numSnapshots); CompositeData d=(CompositeData)Array.get(directories,0); CompositeData s=(CompositeData)Array.get(snapshots,0); assertTrue(((String)d.get("path")).contains(pathName)); assertTrue(((String)s.get("snapshotDirectory")).contains(pathName)); } finally { if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestSnapshottableDirListing

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test listing all the snapshottable directories */ @Test(timeout=60000) public void testListSnapshottableDir() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); SnapshottableDirectoryStatus[] dirs=hdfs.getSnapshottableDirListing(); assertNull(dirs); final Path root=new Path("/"); hdfs.allowSnapshot(root); dirs=hdfs.getSnapshottableDirListing(); assertEquals(1,dirs.length); assertEquals("",dirs[0].getDirStatus().getLocalName()); assertEquals(root,dirs[0].getFullPath()); hdfs.disallowSnapshot(root); dirs=hdfs.getSnapshottableDirListing(); assertNull(dirs); hdfs.allowSnapshot(dir1); dirs=hdfs.getSnapshottableDirListing(); assertEquals(1,dirs.length); assertEquals(dir1.getName(),dirs[0].getDirStatus().getLocalName()); assertEquals(dir1,dirs[0].getFullPath()); assertEquals(0,dirs[0].getSnapshotNumber()); hdfs.allowSnapshot(dir2); dirs=hdfs.getSnapshottableDirListing(); assertEquals(2,dirs.length); assertEquals(dir1.getName(),dirs[0].getDirStatus().getLocalName()); assertEquals(dir1,dirs[0].getFullPath()); assertEquals(dir2.getName(),dirs[1].getDirStatus().getLocalName()); assertEquals(dir2,dirs[1].getFullPath()); assertEquals(0,dirs[1].getSnapshotNumber()); final Path dir3=new Path("/TestSnapshot3"); hdfs.mkdirs(dir3); hdfs.rename(dir3,dir2,Rename.OVERWRITE); dirs=hdfs.getSnapshottableDirListing(); assertEquals(1,dirs.length); assertEquals(dir1,dirs[0].getFullPath()); hdfs.allowSnapshot(dir2); hdfs.createSnapshot(dir2,"s1"); hdfs.createSnapshot(dir2,"s2"); dirs=hdfs.getSnapshottableDirListing(); assertEquals(dir2,dirs[1].getFullPath()); assertEquals(2,dirs[1].getSnapshotNumber()); Path sub1=new Path(dir1,"sub1"); Path file1=new Path(sub1,"file1"); Path sub2=new Path(dir1,"sub2"); Path file2=new Path(sub2,"file2"); DFSTestUtil.createFile(hdfs,file1,BLOCKSIZE,REPLICATION,seed); DFSTestUtil.createFile(hdfs,file2,BLOCKSIZE,REPLICATION,seed); hdfs.allowSnapshot(sub1); hdfs.allowSnapshot(sub2); dirs=hdfs.getSnapshottableDirListing(); assertEquals(4,dirs.length); assertEquals(dir1,dirs[0].getFullPath()); assertEquals(dir2,dirs[1].getFullPath()); assertEquals(sub1,dirs[2].getFullPath()); assertEquals(sub2,dirs[3].getFullPath()); hdfs.disallowSnapshot(sub1); dirs=hdfs.getSnapshottableDirListing(); assertEquals(3,dirs.length); assertEquals(dir1,dirs[0].getFullPath()); assertEquals(dir2,dirs[1].getFullPath()); assertEquals(sub2,dirs[2].getFullPath()); hdfs.delete(dir1,true); dirs=hdfs.getSnapshottableDirListing(); assertEquals(1,dirs.length); assertEquals(dir2.getName(),dirs[0].getDirStatus().getLocalName()); assertEquals(dir2,dirs[0].getFullPath()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test the listing with different user names to make sure only directories * that are owned by the user are listed. */ @Test(timeout=60000) public void testListWithDifferentUser() throws Exception { cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true); hdfs.allowSnapshot(dir1); hdfs.allowSnapshot(dir2); hdfs.setPermission(root,FsPermission.valueOf("-rwxrwxrwx")); UserGroupInformation ugi1=UserGroupInformation.createUserForTesting("user1",new String[]{"group1"}); DistributedFileSystem fs1=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(ugi1,conf); Path dir1_user1=new Path("/dir1_user1"); Path dir2_user1=new Path("/dir2_user1"); fs1.mkdirs(dir1_user1); fs1.mkdirs(dir2_user1); hdfs.allowSnapshot(dir1_user1); hdfs.allowSnapshot(dir2_user1); UserGroupInformation ugi2=UserGroupInformation.createUserForTesting("user2",new String[]{"group2"}); DistributedFileSystem fs2=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(ugi2,conf); Path dir_user2=new Path("/dir_user2"); Path subdir_user2=new Path(dir_user2,"subdir"); fs2.mkdirs(dir_user2); fs2.mkdirs(subdir_user2); hdfs.allowSnapshot(dir_user2); hdfs.allowSnapshot(subdir_user2); String supergroup=conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); UserGroupInformation superUgi=UserGroupInformation.createUserForTesting("superuser",new String[]{supergroup}); DistributedFileSystem fs3=(DistributedFileSystem)DFSTestUtil.getFileSystemAs(superUgi,conf); SnapshottableDirectoryStatus[] dirs=fs3.getSnapshottableDirListing(); assertEquals(6,dirs.length); dirs=fs1.getSnapshottableDirListing(); assertEquals(2,dirs.length); assertEquals(dir1_user1,dirs[0].getFullPath()); assertEquals(dir2_user1,dirs[1].getFullPath()); dirs=fs2.getSnapshottableDirListing(); assertEquals(2,dirs.length); assertEquals(dir_user2,dirs[0].getFullPath()); assertEquals(subdir_user2,dirs[1].getFullPath()); }

Class: org.apache.hadoop.hdfs.server.namenode.snapshot.TestXAttrWithSnapshot

InternalCallVerifier EqualityVerifier 
/** * 1) Save xattrs, then create snapshot. Assert that inode of original and * snapshot have same xattrs. 2) Change the original xattrs, assert snapshot * still has old xattrs. */ @Test public void testXAttrForSnapshotRootAfterChange() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); hdfs.setXAttr(path,name1,value1); hdfs.setXAttr(path,name2,value2); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); Map xattrs=hdfs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); xattrs=hdfs.getXAttrs(snapshotPath); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); hdfs.setXAttr(path,name1,newValue1); doSnapshotRootChangeAssertions(path,snapshotPath); restart(false); doSnapshotRootChangeAssertions(path,snapshotPath); restart(true); doSnapshotRootChangeAssertions(path,snapshotPath); }

InternalCallVerifier EqualityVerifier 
/** * Tests modifying xattrs on a directory that has been snapshotted */ @Test(timeout=120000) public void testModifyReadsCurrentState() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); hdfs.setXAttr(path,name1,value1); hdfs.setXAttr(path,name2,value2); Map xattrs=hdfs.getXAttrs(path); assertEquals(xattrs.size(),2); assertArrayEquals(value1,xattrs.get(name1)); assertArrayEquals(value2,xattrs.get(name2)); xattrs=hdfs.getXAttrs(snapshotPath); assertEquals(xattrs.size(),0); hdfs.setXAttr(path,name1,value2,EnumSet.of(XAttrSetFlag.REPLACE)); xattrs=hdfs.getXAttrs(path); assertEquals(xattrs.size(),2); assertArrayEquals(value2,xattrs.get(name1)); assertArrayEquals(value2,xattrs.get(name2)); hdfs.setXAttr(path,name2,value1,EnumSet.of(XAttrSetFlag.REPLACE)); xattrs=hdfs.getXAttrs(path); assertEquals(xattrs.size(),2); assertArrayEquals(value2,xattrs.get(name1)); assertArrayEquals(value1,xattrs.get(name2)); xattrs=hdfs.getXAttrs(snapshotPath); assertEquals(xattrs.size(),0); hdfs.removeXAttr(path,name1); hdfs.removeXAttr(path,name2); xattrs=hdfs.getXAttrs(path); assertEquals(xattrs.size(),0); }

InternalCallVerifier EqualityVerifier 
/** * 1) Save xattrs, then create snapshot. Assert that inode of original and * snapshot have same xattrs. 2) Remove some original xattrs, assert snapshot * still has old xattrs. */ @Test public void testXAttrForSnapshotRootAfterRemove() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); hdfs.setXAttr(path,name1,value1); hdfs.setXAttr(path,name2,value2); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); Map xattrs=hdfs.getXAttrs(path); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); xattrs=hdfs.getXAttrs(snapshotPath); Assert.assertEquals(xattrs.size(),2); Assert.assertArrayEquals(value1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); hdfs.removeXAttr(path,name1); hdfs.removeXAttr(path,name2); doSnapshotRootRemovalAssertions(path,snapshotPath); restart(false); doSnapshotRootRemovalAssertions(path,snapshotPath); restart(true); doSnapshotRootRemovalAssertions(path,snapshotPath); }

InternalCallVerifier EqualityVerifier 
/** * Assert exception of setting xattr when exceeding quota. */ @Test public void testSetXAttrExceedsQuota() throws Exception { Path filePath=new Path(path,"file1"); Path fileSnapshotPath=new Path(snapshotPath,"file1"); FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0755)); hdfs.allowSnapshot(path); hdfs.setQuota(path,3,HdfsConstants.QUOTA_DONT_SET); FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close(); hdfs.setXAttr(filePath,name1,value1); hdfs.createSnapshot(path,snapshotName); byte[] value=hdfs.getXAttr(filePath,name1); Assert.assertArrayEquals(value,value1); value=hdfs.getXAttr(fileSnapshotPath,name1); Assert.assertArrayEquals(value,value1); exception.expect(NSQuotaExceededException.class); hdfs.setXAttr(filePath,name2,value2); }

InternalCallVerifier EqualityVerifier 
/** * Tests removing xattrs on a directory that has been snapshotted */ @Test(timeout=120000) public void testRemoveReadsCurrentState() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); hdfs.setXAttr(path,name1,value1); hdfs.setXAttr(path,name2,value2); Map xattrs=hdfs.getXAttrs(path); assertEquals(xattrs.size(),2); assertArrayEquals(value1,xattrs.get(name1)); assertArrayEquals(value2,xattrs.get(name2)); xattrs=hdfs.getXAttrs(snapshotPath); assertEquals(xattrs.size(),0); hdfs.removeXAttr(path,name2); xattrs=hdfs.getXAttrs(path); assertEquals(xattrs.size(),1); assertArrayEquals(value1,xattrs.get(name1)); hdfs.removeXAttr(path,name1); xattrs=hdfs.getXAttrs(path); assertEquals(xattrs.size(),0); }

InternalCallVerifier EqualityVerifier 
/** * Test successive snapshots in between modifications of XAttrs. * Also verify that snapshot XAttrs are not altered when a * snapshot is deleted. */ @Test public void testSuccessiveSnapshotXAttrChanges() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); hdfs.setXAttr(path,name1,value1); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); Map xattrs=hdfs.getXAttrs(snapshotPath); Assert.assertEquals(1,xattrs.size()); Assert.assertArrayEquals(value1,xattrs.get(name1)); hdfs.setXAttr(path,name1,newValue1); hdfs.setXAttr(path,name2,value2); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName2); xattrs=hdfs.getXAttrs(snapshotPath2); Assert.assertEquals(2,xattrs.size()); Assert.assertArrayEquals(newValue1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); hdfs.setXAttr(path,name1,value1); hdfs.removeXAttr(path,name2); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName3); xattrs=hdfs.getXAttrs(snapshotPath3); Assert.assertEquals(1,xattrs.size()); Assert.assertArrayEquals(value1,xattrs.get(name1)); xattrs=hdfs.getXAttrs(snapshotPath); Assert.assertEquals(1,xattrs.size()); Assert.assertArrayEquals(value1,xattrs.get(name1)); xattrs=hdfs.getXAttrs(snapshotPath2); Assert.assertEquals(2,xattrs.size()); Assert.assertArrayEquals(newValue1,xattrs.get(name1)); Assert.assertArrayEquals(value2,xattrs.get(name2)); hdfs.deleteSnapshot(path,snapshotName2); xattrs=hdfs.getXAttrs(snapshotPath); Assert.assertEquals(1,xattrs.size()); Assert.assertArrayEquals(value1,xattrs.get(name1)); xattrs=hdfs.getXAttrs(snapshotPath3); Assert.assertEquals(1,xattrs.size()); Assert.assertArrayEquals(value1,xattrs.get(name1)); hdfs.deleteSnapshot(path,snapshotName); hdfs.deleteSnapshot(path,snapshotName3); }

InternalCallVerifier EqualityVerifier 
/** * Assert exception of removing xattr when exceeding quota. */ @Test public void testRemoveXAttrExceedsQuota() throws Exception { Path filePath=new Path(path,"file1"); Path fileSnapshotPath=new Path(snapshotPath,"file1"); FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0755)); hdfs.allowSnapshot(path); hdfs.setQuota(path,3,HdfsConstants.QUOTA_DONT_SET); FileSystem.create(hdfs,filePath,FsPermission.createImmutable((short)0600)).close(); hdfs.setXAttr(filePath,name1,value1); hdfs.createSnapshot(path,snapshotName); byte[] value=hdfs.getXAttr(filePath,name1); Assert.assertArrayEquals(value,value1); value=hdfs.getXAttr(fileSnapshotPath,name1); Assert.assertArrayEquals(value,value1); exception.expect(NSQuotaExceededException.class); hdfs.removeXAttr(filePath,name1); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that users can copy a snapshot while preserving its xattrs. */ @Test(timeout=120000) public void testCopySnapshotShouldPreserveXAttrs() throws Exception { FileSystem.mkdirs(hdfs,path,FsPermission.createImmutable((short)0700)); hdfs.setXAttr(path,name1,value1); hdfs.setXAttr(path,name2,value2); SnapshotTestHelper.createSnapshot(hdfs,path,snapshotName); Path snapshotCopy=new Path(path.toString() + "-copy"); String[] argv=new String[]{"-cp","-px",snapshotPath.toUri().toString(),snapshotCopy.toUri().toString()}; int ret=ToolRunner.run(new FsShell(conf),argv); assertEquals("cp -px is not working on a snapshot",SUCCESS,ret); Map xattrs=hdfs.getXAttrs(snapshotCopy); assertArrayEquals(value1,xattrs.get(name1)); assertArrayEquals(value2,xattrs.get(name2)); }

Class: org.apache.hadoop.hdfs.server.namenode.startupprogress.TestStartupProgress

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testThreadSafety() throws Exception { int numThreads=100; Phase[] phases={LOADING_FSIMAGE,LOADING_FSIMAGE,LOADING_EDITS,LOADING_EDITS}; Step[] steps=new Step[]{new Step(INODES),new Step(DELEGATION_KEYS),new Step(INODES),new Step(DELEGATION_KEYS)}; String[] files={"file1","file1","file2","file2"}; long[] sizes={1000L,1000L,2000L,2000L}; long[] totals={10000L,20000L,30000L,40000L}; ExecutorService exec=Executors.newFixedThreadPool(numThreads); try { for (int i=0; i < numThreads; ++i) { final Phase phase=phases[i % phases.length]; final Step step=steps[i % steps.length]; final String file=files[i % files.length]; final long size=sizes[i % sizes.length]; final long total=totals[i % totals.length]; exec.submit(new Callable(){ @Override public Void call(){ startupProgress.beginPhase(phase); startupProgress.setFile(phase,file); startupProgress.setSize(phase,size); startupProgress.setTotal(phase,step,total); incrementCounter(startupProgress,phase,step,100L); startupProgress.endStep(phase,step); startupProgress.endPhase(phase); return null; } } ); } } finally { exec.shutdown(); assertTrue(exec.awaitTermination(10000L,TimeUnit.MILLISECONDS)); } StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals("file1",view.getFile(LOADING_FSIMAGE)); assertEquals(1000L,view.getSize(LOADING_FSIMAGE)); assertEquals(10000L,view.getTotal(LOADING_FSIMAGE,new Step(INODES))); assertEquals(2500L,view.getCount(LOADING_FSIMAGE,new Step(INODES))); assertEquals(20000L,view.getTotal(LOADING_FSIMAGE,new Step(DELEGATION_KEYS))); assertEquals(2500L,view.getCount(LOADING_FSIMAGE,new Step(DELEGATION_KEYS))); assertEquals("file2",view.getFile(LOADING_EDITS)); assertEquals(2000L,view.getSize(LOADING_EDITS)); assertEquals(30000L,view.getTotal(LOADING_EDITS,new Step(INODES))); assertEquals(2500L,view.getCount(LOADING_EDITS,new Step(INODES))); assertEquals(40000L,view.getTotal(LOADING_EDITS,new Step(DELEGATION_KEYS))); assertEquals(2500L,view.getCount(LOADING_EDITS,new Step(DELEGATION_KEYS))); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testCounter(){ startupProgress.beginPhase(LOADING_FSIMAGE); Step loadingFsImageInodes=new Step(INODES); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes); incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageInodes,100L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes); Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageDelegationKeys,200L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.beginPhase(LOADING_EDITS); Step loadingEditsFile=new Step("file",1000L); startupProgress.beginStep(LOADING_EDITS,loadingEditsFile); incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,5000L); StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals(100L,view.getCount(LOADING_FSIMAGE,loadingFsImageInodes)); assertEquals(200L,view.getCount(LOADING_FSIMAGE,loadingFsImageDelegationKeys)); assertEquals(5000L,view.getCount(LOADING_EDITS,loadingEditsFile)); assertEquals(0L,view.getCount(SAVING_CHECKPOINT,new Step(INODES))); incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,1000L); startupProgress.endStep(LOADING_EDITS,loadingEditsFile); startupProgress.endPhase(LOADING_EDITS); assertEquals(5000L,view.getCount(LOADING_EDITS,loadingEditsFile)); view=startupProgress.createView(); assertNotNull(view); assertEquals(6000L,view.getCount(LOADING_EDITS,loadingEditsFile)); }

InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
@Test(timeout=10000) public void testStepSequence(){ Step[] expectedSteps=new Step[]{new Step(INODES,"file1"),new Step(DELEGATION_KEYS,"file1"),new Step(INODES,"file2"),new Step(DELEGATION_KEYS,"file2"),new Step(INODES,"file3"),new Step(DELEGATION_KEYS,"file3")}; List shuffledSteps=new ArrayList(Arrays.asList(expectedSteps)); Collections.shuffle(shuffledSteps); startupProgress.beginPhase(SAVING_CHECKPOINT); for ( Step step : shuffledSteps) { startupProgress.beginStep(SAVING_CHECKPOINT,step); } List actualSteps=new ArrayList(expectedSteps.length); StartupProgressView view=startupProgress.createView(); assertNotNull(view); for ( Step step : view.getSteps(SAVING_CHECKPOINT)) { actualSteps.add(step); } assertArrayEquals(expectedSteps,actualSteps.toArray()); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testElapsedTime() throws Exception { startupProgress.beginPhase(LOADING_FSIMAGE); Step loadingFsImageInodes=new Step(INODES); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes); Thread.sleep(50L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes); Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); Thread.sleep(50L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.beginPhase(LOADING_EDITS); Step loadingEditsFile=new Step("file",1000L); startupProgress.beginStep(LOADING_EDITS,loadingEditsFile); startupProgress.setTotal(LOADING_EDITS,loadingEditsFile,10000L); incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,5000L); Thread.sleep(50L); StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertTrue(view.getElapsedTime() > 0); assertTrue(view.getElapsedTime(LOADING_FSIMAGE) > 0); assertTrue(view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes) > 0); assertTrue(view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageDelegationKeys) > 0); assertTrue(view.getElapsedTime(LOADING_EDITS) > 0); assertTrue(view.getElapsedTime(LOADING_EDITS,loadingEditsFile) > 0); assertTrue(view.getElapsedTime(SAVING_CHECKPOINT) == 0); assertTrue(view.getElapsedTime(SAVING_CHECKPOINT,new Step(INODES)) == 0); long totalTime=view.getElapsedTime(); long loadingFsImageTime=view.getElapsedTime(LOADING_FSIMAGE); long loadingFsImageInodesTime=view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes); long loadingFsImageDelegationKeysTime=view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes); long loadingEditsTime=view.getElapsedTime(LOADING_EDITS); long loadingEditsFileTime=view.getElapsedTime(LOADING_EDITS,loadingEditsFile); Thread.sleep(50L); assertTrue(totalTime < view.getElapsedTime()); assertEquals(loadingFsImageTime,view.getElapsedTime(LOADING_FSIMAGE)); assertEquals(loadingFsImageInodesTime,view.getElapsedTime(LOADING_FSIMAGE,loadingFsImageInodes)); assertTrue(loadingEditsTime < view.getElapsedTime(LOADING_EDITS)); assertTrue(loadingEditsFileTime < view.getElapsedTime(LOADING_EDITS,loadingEditsFile)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testPercentComplete(){ startupProgress.beginPhase(LOADING_FSIMAGE); Step loadingFsImageInodes=new Step(INODES); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes); startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageInodes,1000L); incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageInodes,100L); Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageDelegationKeys,800L); incrementCounter(startupProgress,LOADING_FSIMAGE,loadingFsImageDelegationKeys,200L); startupProgress.beginPhase(LOADING_EDITS); Step loadingEditsFile=new Step("file",1000L); startupProgress.beginStep(LOADING_EDITS,loadingEditsFile); startupProgress.setTotal(LOADING_EDITS,loadingEditsFile,10000L); incrementCounter(startupProgress,LOADING_EDITS,loadingEditsFile,5000L); StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals(0.167f,view.getPercentComplete(),0.001f); assertEquals(0.167f,view.getPercentComplete(LOADING_FSIMAGE),0.001f); assertEquals(0.10f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageInodes),0.001f); assertEquals(0.25f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageDelegationKeys),0.001f); assertEquals(0.5f,view.getPercentComplete(LOADING_EDITS),0.001f); assertEquals(0.5f,view.getPercentComplete(LOADING_EDITS,loadingEditsFile),0.001f); assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT),0.001f); assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT,new Step(INODES)),0.001f); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.endStep(LOADING_EDITS,loadingEditsFile); startupProgress.endPhase(LOADING_EDITS); view=startupProgress.createView(); assertNotNull(view); assertEquals(0.5f,view.getPercentComplete(),0.001f); assertEquals(1.0f,view.getPercentComplete(LOADING_FSIMAGE),0.001f); assertEquals(1.0f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageInodes),0.001f); assertEquals(1.0f,view.getPercentComplete(LOADING_FSIMAGE,loadingFsImageDelegationKeys),0.001f); assertEquals(1.0f,view.getPercentComplete(LOADING_EDITS),0.001f); assertEquals(1.0f,view.getPercentComplete(LOADING_EDITS,loadingEditsFile),0.001f); assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT),0.001f); assertEquals(0.0f,view.getPercentComplete(SAVING_CHECKPOINT,new Step(INODES)),0.001f); }

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testInitialState(){ StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals(0L,view.getElapsedTime()); assertEquals(0.0f,view.getPercentComplete(),0.001f); List phases=new ArrayList(); for ( Phase phase : view.getPhases()) { phases.add(phase); assertEquals(0L,view.getElapsedTime(phase)); assertNull(view.getFile(phase)); assertEquals(0.0f,view.getPercentComplete(phase),0.001f); assertEquals(Long.MIN_VALUE,view.getSize(phase)); assertEquals(PENDING,view.getStatus(phase)); assertEquals(0L,view.getTotal(phase)); for ( Step step : view.getSteps(phase)) { fail(String.format("unexpected step %s in phase %s at initial state",step,phase)); } } assertArrayEquals(EnumSet.allOf(Phase.class).toArray(),phases.toArray()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testFrozenAfterStartupCompletes(){ startupProgress.beginPhase(LOADING_FSIMAGE); startupProgress.setFile(LOADING_FSIMAGE,"file1"); startupProgress.setSize(LOADING_FSIMAGE,1000L); Step step=new Step(INODES); startupProgress.beginStep(LOADING_FSIMAGE,step); startupProgress.setTotal(LOADING_FSIMAGE,step,10000L); incrementCounter(startupProgress,LOADING_FSIMAGE,step,100L); startupProgress.endStep(LOADING_FSIMAGE,step); startupProgress.endPhase(LOADING_FSIMAGE); for ( Phase phase : EnumSet.allOf(Phase.class)) { if (startupProgress.getStatus(phase) != Status.COMPLETE) { startupProgress.beginPhase(phase); startupProgress.endPhase(phase); } } StartupProgressView before=startupProgress.createView(); startupProgress.beginPhase(LOADING_FSIMAGE); startupProgress.setFile(LOADING_FSIMAGE,"file2"); startupProgress.setSize(LOADING_FSIMAGE,2000L); startupProgress.beginStep(LOADING_FSIMAGE,step); startupProgress.setTotal(LOADING_FSIMAGE,step,20000L); incrementCounter(startupProgress,LOADING_FSIMAGE,step,100L); startupProgress.endStep(LOADING_FSIMAGE,step); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.beginPhase(LOADING_EDITS); Step newStep=new Step("file1"); startupProgress.beginStep(LOADING_EDITS,newStep); incrementCounter(startupProgress,LOADING_EDITS,newStep,100L); startupProgress.endStep(LOADING_EDITS,newStep); startupProgress.endPhase(LOADING_EDITS); StartupProgressView after=startupProgress.createView(); assertEquals(before.getCount(LOADING_FSIMAGE),after.getCount(LOADING_FSIMAGE)); assertEquals(before.getCount(LOADING_FSIMAGE,step),after.getCount(LOADING_FSIMAGE,step)); assertEquals(before.getElapsedTime(),after.getElapsedTime()); assertEquals(before.getElapsedTime(LOADING_FSIMAGE),after.getElapsedTime(LOADING_FSIMAGE)); assertEquals(before.getElapsedTime(LOADING_FSIMAGE,step),after.getElapsedTime(LOADING_FSIMAGE,step)); assertEquals(before.getFile(LOADING_FSIMAGE),after.getFile(LOADING_FSIMAGE)); assertEquals(before.getSize(LOADING_FSIMAGE),after.getSize(LOADING_FSIMAGE)); assertEquals(before.getTotal(LOADING_FSIMAGE),after.getTotal(LOADING_FSIMAGE)); assertEquals(before.getTotal(LOADING_FSIMAGE,step),after.getTotal(LOADING_FSIMAGE,step)); assertFalse(after.getSteps(LOADING_EDITS).iterator().hasNext()); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testTotal(){ startupProgress.beginPhase(LOADING_FSIMAGE); Step loadingFsImageInodes=new Step(INODES); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageInodes); startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageInodes,1000L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageInodes); Step loadingFsImageDelegationKeys=new Step(DELEGATION_KEYS); startupProgress.beginStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.setTotal(LOADING_FSIMAGE,loadingFsImageDelegationKeys,800L); startupProgress.endStep(LOADING_FSIMAGE,loadingFsImageDelegationKeys); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.beginPhase(LOADING_EDITS); Step loadingEditsFile=new Step("file",1000L); startupProgress.beginStep(LOADING_EDITS,loadingEditsFile); startupProgress.setTotal(LOADING_EDITS,loadingEditsFile,10000L); startupProgress.endStep(LOADING_EDITS,loadingEditsFile); startupProgress.endPhase(LOADING_EDITS); StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals(1000L,view.getTotal(LOADING_FSIMAGE,loadingFsImageInodes)); assertEquals(800L,view.getTotal(LOADING_FSIMAGE,loadingFsImageDelegationKeys)); assertEquals(10000L,view.getTotal(LOADING_EDITS,loadingEditsFile)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testStatus(){ startupProgress.beginPhase(LOADING_FSIMAGE); startupProgress.endPhase(LOADING_FSIMAGE); startupProgress.beginPhase(LOADING_EDITS); StartupProgressView view=startupProgress.createView(); assertNotNull(view); assertEquals(COMPLETE,view.getStatus(LOADING_FSIMAGE)); assertEquals(RUNNING,view.getStatus(LOADING_EDITS)); assertEquals(PENDING,view.getStatus(SAVING_CHECKPOINT)); }

Class: org.apache.hadoop.hdfs.server.namenode.web.resources.TestWebHdfsDataLocality

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testDataLocality() throws Exception { final Configuration conf=WebHdfsTestUtil.createConf(); final String[] racks={RACK0,RACK0,RACK1,RACK1,RACK2,RACK2}; final int nDataNodes=racks.length; LOG.info("nDataNodes=" + nDataNodes + ", racks="+ Arrays.asList(racks)); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(nDataNodes).racks(racks).build(); try { cluster.waitActive(); final DistributedFileSystem dfs=cluster.getFileSystem(); final NameNode namenode=cluster.getNameNode(); final DatanodeManager dm=namenode.getNamesystem().getBlockManager().getDatanodeManager(); LOG.info("dm=" + dm); final long blocksize=DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; final String f="/foo"; { for (int i=0; i < nDataNodes; i++) { final DataNode dn=cluster.getDataNodes().get(i); final String ipAddr=dm.getDatanode(dn.getDatanodeId()).getIpAddr(); final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PutOpParam.Op.CREATE,-1L,blocksize,null); Assert.assertEquals(ipAddr,chosen.getIpAddr()); } } final Path p=new Path(f); final FSDataOutputStream out=dfs.create(p,(short)1); out.write(1); out.close(); final LocatedBlocks locatedblocks=NameNodeAdapter.getBlockLocations(namenode,f,0,1); final List lb=locatedblocks.getLocatedBlocks(); Assert.assertEquals(1,lb.size()); final DatanodeInfo[] locations=lb.get(0).getLocations(); Assert.assertEquals(1,locations.length); final DatanodeInfo expected=locations[0]; { final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.GETFILECHECKSUM,-1L,blocksize,null); Assert.assertEquals(expected,chosen); } { final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.OPEN,0,blocksize,null); Assert.assertEquals(expected,chosen); } { final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PostOpParam.Op.APPEND,-1L,blocksize,null); Assert.assertEquals(expected,chosen); } } finally { cluster.shutdown(); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testExcludeDataNodes() throws Exception { final Configuration conf=WebHdfsTestUtil.createConf(); final String[] racks={RACK0,RACK0,RACK1,RACK1,RACK2,RACK2}; final String[] hosts={"DataNode1","DataNode2","DataNode3","DataNode4","DataNode5","DataNode6"}; final int nDataNodes=hosts.length; LOG.info("nDataNodes=" + nDataNodes + ", racks="+ Arrays.asList(racks)+ ", hosts="+ Arrays.asList(hosts)); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).hosts(hosts).numDataNodes(nDataNodes).racks(racks).build(); try { cluster.waitActive(); final DistributedFileSystem dfs=cluster.getFileSystem(); final NameNode namenode=cluster.getNameNode(); final DatanodeManager dm=namenode.getNamesystem().getBlockManager().getDatanodeManager(); LOG.info("dm=" + dm); final long blocksize=DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; final String f="/foo"; final Path p=new Path(f); final FSDataOutputStream out=dfs.create(p,(short)3); out.write(1); out.close(); final LocatedBlocks locatedblocks=NameNodeAdapter.getBlockLocations(namenode,f,0,1); final List lb=locatedblocks.getLocatedBlocks(); Assert.assertEquals(1,lb.size()); final DatanodeInfo[] locations=lb.get(0).getLocations(); Assert.assertEquals(3,locations.length); StringBuffer sb=new StringBuffer(); for (int i=0; i < 2; i++) { sb.append(locations[i].getXferAddr()); { final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.GETFILECHECKSUM,-1L,blocksize,sb.toString()); for (int j=0; j <= i; j++) { Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName()); } } { final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,GetOpParam.Op.OPEN,0,blocksize,sb.toString()); for (int j=0; j <= i; j++) { Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName()); } } { final DatanodeInfo chosen=NamenodeWebHdfsMethods.chooseDatanode(namenode,f,PostOpParam.Op.APPEND,-1L,blocksize,sb.toString()); for (int j=0; j <= i; j++) { Assert.assertNotEquals(locations[j].getHostName(),chosen.getHostName()); } } sb.append(","); } } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.shortcircuit.TestShortCircuitCache

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test unlinking a file whose blocks we are caching in the DFSClient. * The DataNode will notify the DFSClient that the replica is stale via the * ShortCircuitShm. */ @Test(timeout=60000) public void testUnlinkingReplicasInFileDescriptorCache() throws Exception { BlockReaderTestUtil.enableShortCircuitShmTracing(); TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration conf=createShortCircuitConf("testUnlinkingReplicasInFileDescriptorCache",sockDir); conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,1000000000L); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs=cluster.getFileSystem(); final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache(); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertEquals(0,info.size()); } } ); final Path TEST_PATH=new Path("/test_file"); final int TEST_FILE_LEN=8193; final int SEED=0xFADE0; DFSTestUtil.createFile(fs,TEST_PATH,TEST_FILE_LEN,(short)1,SEED); byte contents[]=DFSTestUtil.readFileBuffer(fs,TEST_PATH); byte expected[]=DFSTestUtil.calculateFileContentsFromSeed(SEED,TEST_FILE_LEN); Assert.assertTrue(Arrays.equals(contents,expected)); final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertTrue(info.get(datanode).full.isEmpty()); Assert.assertFalse(info.get(datanode).disabled); Assert.assertEquals(1,info.get(datanode).notFull.values().size()); DfsClientShm shm=info.get(datanode).notFull.values().iterator().next(); Assert.assertFalse(shm.isDisconnected()); } } ); fs.delete(TEST_PATH,false); GenericTestUtils.waitFor(new Supplier(){ MutableBoolean done=new MutableBoolean(true); @Override public Boolean get(){ try { done.setValue(true); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertTrue(info.get(datanode).full.isEmpty()); Assert.assertFalse(info.get(datanode).disabled); Assert.assertEquals(1,info.get(datanode).notFull.values().size()); DfsClientShm shm=info.get(datanode).notFull.values().iterator().next(); for (Iterator iter=shm.slotIterator(); iter.hasNext(); ) { Slot slot=iter.next(); if (slot.isValid()) { done.setValue(false); } } } } ); } catch ( IOException e) { LOG.error("error running visitor",e); } return done.booleanValue(); } } ,10,60000); cluster.shutdown(); sockDir.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAllocShm() throws Exception { BlockReaderTestUtil.enableShortCircuitShmTracing(); TemporarySocketDirectory sockDir=new TemporarySocketDirectory(); Configuration conf=createShortCircuitConf("testAllocShm",sockDir); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs=cluster.getFileSystem(); final ShortCircuitCache cache=fs.getClient().getClientContext().getShortCircuitCache(); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertEquals(0,info.size()); } } ); DomainPeer peer=getDomainPeerToDn(conf); MutableBoolean usedPeer=new MutableBoolean(false); ExtendedBlockId blockId=new ExtendedBlockId(123,"xyz"); final DatanodeInfo datanode=new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId()); Slot slot=cache.allocShmSlot(datanode,peer,usedPeer,blockId,"testAllocShm_client"); Assert.assertNotNull(slot); Assert.assertTrue(usedPeer.booleanValue()); cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { Assert.assertEquals(1,info.size()); PerDatanodeVisitorInfo vinfo=info.get(datanode); Assert.assertFalse(vinfo.disabled); Assert.assertEquals(0,vinfo.full.size()); Assert.assertEquals(1,vinfo.notFull.size()); } } ); cache.scheduleSlotReleaser(slot); GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ final MutableBoolean done=new MutableBoolean(false); try { cache.getDfsClientShmManager().visit(new Visitor(){ @Override public void visit( HashMap info) throws IOException { done.setValue(info.get(datanode).full.isEmpty() && info.get(datanode).notFull.isEmpty()); } } ); } catch ( IOException e) { LOG.error("error running visitor",e); } return done.booleanValue(); } } ,10,60000); cluster.shutdown(); sockDir.close(); }

Class: org.apache.hadoop.hdfs.shortcircuit.TestShortCircuitShm

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testAllocateSlots() throws Exception { File path=new File(TEST_BASE,"testAllocateSlots"); path.mkdirs(); SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("shm_",new String[]{path.getAbsolutePath()}); FileInputStream stream=factory.createDescriptor("testAllocateSlots",4096); ShortCircuitShm shm=new ShortCircuitShm(ShmId.createRandom(),stream); int numSlots=0; ArrayList slots=new ArrayList(); while (!shm.isFull()) { Slot slot=shm.allocAndRegisterSlot(new ExtendedBlockId(123L,"test_bp1")); slots.add(slot); numSlots++; } LOG.info("allocated " + numSlots + " slots before running out."); int slotIdx=0; for (Iterator iter=shm.slotIterator(); iter.hasNext(); ) { Assert.assertTrue(slots.contains(iter.next())); } for ( Slot slot : slots) { Assert.assertFalse(slot.addAnchor()); Assert.assertEquals(slotIdx++,slot.getSlotIdx()); } for ( Slot slot : slots) { slot.makeAnchorable(); } for ( Slot slot : slots) { Assert.assertTrue(slot.addAnchor()); } for ( Slot slot : slots) { slot.removeAnchor(); } for ( Slot slot : slots) { shm.unregisterSlot(slot.getSlotIdx()); slot.makeInvalid(); } shm.free(); stream.close(); FileUtil.fullyDelete(path); }

Class: org.apache.hadoop.hdfs.tools.TestDFSAdminWithHA

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testSaveNamespace() throws Exception { setUpHaCluster(false); int exitCode=admin.run(new String[]{"-safemode","enter"}); assertEquals(err.toString().trim(),0,exitCode); String message="Safe mode is ON in.*"; assertOutputMatches(message + newLine + message+ newLine); exitCode=admin.run(new String[]{"-saveNamespace"}); assertEquals(err.toString().trim(),0,exitCode); message="Save namespace successful for.*"; assertOutputMatches(message + newLine + message+ newLine); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testRefreshNodes() throws Exception { setUpHaCluster(false); int exitCode=admin.run(new String[]{"-refreshNodes"}); assertEquals(err.toString().trim(),0,exitCode); String message="Refresh nodes successful for.*"; assertOutputMatches(message + newLine + message+ newLine); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testMetaSave() throws Exception { setUpHaCluster(false); int exitCode=admin.run(new String[]{"-metasave","dfs.meta"}); assertEquals(err.toString().trim(),0,exitCode); String message="Created metasave file dfs.meta in the log directory" + " of namenode.*"; assertOutputMatches(message + newLine + message+ newLine); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testRefreshServiceAcl() throws Exception { setUpHaCluster(true); int exitCode=admin.run(new String[]{"-refreshServiceAcl"}); assertEquals(err.toString().trim(),0,exitCode); String message="Refresh service acl successful for.*"; assertOutputMatches(message + newLine + message+ newLine); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testRefreshCallQueue() throws Exception { setUpHaCluster(false); int exitCode=admin.run(new String[]{"-refreshCallQueue"}); assertEquals(err.toString().trim(),0,exitCode); String message="Refresh call queue successful for.*"; assertOutputMatches(message + newLine + message+ newLine); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testRefreshUserToGroupsMappings() throws Exception { setUpHaCluster(false); int exitCode=admin.run(new String[]{"-refreshUserToGroupsMappings"}); assertEquals(err.toString().trim(),0,exitCode); String message="Refresh user to groups mapping successful for.*"; assertOutputMatches(message + newLine + message+ newLine); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testRestoreFailedStorage() throws Exception { setUpHaCluster(false); int exitCode=admin.run(new String[]{"-restoreFailedStorage","check"}); assertEquals(err.toString().trim(),0,exitCode); String message="restoreFailedStorage is set to false for.*"; assertOutputMatches(message + newLine + message+ newLine); exitCode=admin.run(new String[]{"-restoreFailedStorage","true"}); assertEquals(err.toString().trim(),0,exitCode); message="restoreFailedStorage is set to true for.*"; assertOutputMatches(message + newLine + message+ newLine); exitCode=admin.run(new String[]{"-restoreFailedStorage","false"}); assertEquals(err.toString().trim(),0,exitCode); message="restoreFailedStorage is set to false for.*"; assertOutputMatches(message + newLine + message+ newLine); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testSetSafeMode() throws Exception { setUpHaCluster(false); int exitCode=admin.run(new String[]{"-safemode","enter"}); assertEquals(err.toString().trim(),0,exitCode); String message="Safe mode is ON in.*"; assertOutputMatches(message + newLine + message+ newLine); exitCode=admin.run(new String[]{"-safemode","get"}); assertEquals(err.toString().trim(),0,exitCode); message="Safe mode is ON in.*"; assertOutputMatches(message + newLine + message+ newLine); exitCode=admin.run(new String[]{"-safemode","leave"}); assertEquals(err.toString().trim(),0,exitCode); message="Safe mode is OFF in.*"; assertOutputMatches(message + newLine + message+ newLine); exitCode=admin.run(new String[]{"-safemode","get"}); assertEquals(err.toString().trim(),0,exitCode); message="Safe mode is OFF in.*"; assertOutputMatches(message + newLine + message+ newLine); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testRefreshSuperUserGroupsConfiguration() throws Exception { setUpHaCluster(false); int exitCode=admin.run(new String[]{"-refreshSuperUserGroupsConfiguration"}); assertEquals(err.toString().trim(),0,exitCode); String message="Refresh super user groups configuration successful for.*"; assertOutputMatches(message + newLine + message+ newLine); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=30000) public void testSetBalancerBandwidth() throws Exception { setUpHaCluster(false); int exitCode=admin.run(new String[]{"-setBalancerBandwidth","10"}); assertEquals(err.toString().trim(),0,exitCode); String message="Balancer bandwidth is set to 10 for.*"; assertOutputMatches(message + newLine + message+ newLine); }

Class: org.apache.hadoop.hdfs.tools.TestDFSHAAdmin

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that, if automatic HA is enabled, none of the mutative operations * will succeed, unless the -forcemanual flag is specified. * @throws Exception */ @Test public void testMutativeOperationsWithAutoHaEnabled() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand()); tool.setConf(conf); assertEquals(-1,runTool("-transitionToActive","nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); assertEquals(-1,runTool("-transitionToStandby","nn1")); assertTrue(errOutput.contains("Refusing to manually manage")); Mockito.verify(mockProtocol,Mockito.never()).transitionToActive(anyReqInfo()); Mockito.verify(mockProtocol,Mockito.never()).transitionToStandby(anyReqInfo()); setupConfirmationOnSystemIn(); assertEquals(0,runTool("-transitionToActive","-forcemanual","nn1")); setupConfirmationOnSystemIn(); assertEquals(0,runTool("-transitionToStandby","-forcemanual","nn1")); Mockito.verify(mockProtocol,Mockito.times(1)).transitionToActive(reqInfoCaptor.capture()); Mockito.verify(mockProtocol,Mockito.times(1)).transitionToStandby(reqInfoCaptor.capture()); for ( StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) { assertEquals(RequestSource.REQUEST_BY_USER_FORCED,ri.getSource()); } }

EqualityVerifier 
@Test public void testFailoverWithFenceAndBadFencer() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"foobar!"); tool.setConf(conf); assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence")); }

EqualityVerifier 
@Test public void testCheckHealth() throws Exception { assertEquals(0,runTool("-checkHealth","nn1")); Mockito.verify(mockProtocol).monitorHealth(); Mockito.doThrow(new HealthCheckFailedException("fake health check failure")).when(mockProtocol).monitorHealth(); assertEquals(-1,runTool("-checkHealth","nn1")); assertOutputContains("Health check failed: fake health check failure"); }

EqualityVerifier 
/** * Test that the fencing configuration can be overridden per-nameservice * or per-namenode */ @Test public void testFencingConfigPerNameNode() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); final String nsSpecificKey=DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY + "." + NSID; final String nnSpecificKey=nsSpecificKey + ".nn1"; HdfsConfiguration conf=getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand()); tool.setConf(conf); assertEquals(0,runTool("-failover","nn1","nn2","--forcefence")); conf.set(nnSpecificKey,getFencerFalseCommand()); tool.setConf(conf); assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence")); conf.unset(nnSpecificKey); conf.set(nsSpecificKey,getFencerFalseCommand()); tool.setConf(conf); assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence")); conf.set(nsSpecificKey,getFencerTrueCommand()); tool.setConf(conf); assertEquals(0,runTool("-failover","nn1","nn2","--forcefence")); }

EqualityVerifier 
/** * Test that, even if automatic HA is enabled, the monitoring operations * still function correctly. */ @Test public void testMonitoringOperationsWithAutoHaEnabled() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true); tool.setConf(conf); assertEquals(0,runTool("-checkHealth","nn1")); Mockito.verify(mockProtocol).monitorHealth(); assertEquals(0,runTool("-getServiceState","nn1")); Mockito.verify(mockProtocol).getServiceStatus(); }

EqualityVerifier 
@Test public void testFailoverWithFencerConfigured() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand()); tool.setConf(conf); assertEquals(0,runTool("-failover","nn1","nn2")); }

EqualityVerifier 
@Test public void testFailoverWithFencerConfiguredAndForce() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand()); tool.setConf(conf); assertEquals(0,runTool("-failover","nn1","nn2","--forcefence")); }

EqualityVerifier 
@Test public void testNameserviceOption() throws Exception { assertEquals(-1,runTool("-ns")); assertOutputContains("Missing nameservice ID"); assertEquals(-1,runTool("-ns","ns1")); assertOutputContains("Missing command"); assertEquals(0,runTool("-ns","ns1","-help","transitionToActive")); assertOutputContains("Transitions the service into Active"); }

EqualityVerifier 
@Test public void testFailoverWithFencerAndNameservice() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand()); tool.setConf(conf); assertEquals(0,runTool("-ns","ns1","-failover","nn1","nn2")); }

InternalCallVerifier EqualityVerifier 
@Test public void testTransitionToActive() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(0,runTool("-transitionToActive","nn1")); Mockito.verify(mockProtocol).transitionToActive(reqInfoCaptor.capture()); assertEquals(RequestSource.REQUEST_BY_USER,reqInfoCaptor.getValue().getSource()); }

EqualityVerifier 
@Test public void testFailoverWithFenceButNoFencer() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence")); }

EqualityVerifier 
@Test public void testTransitionToStandby() throws Exception { assertEquals(0,runTool("-transitionToStandby","nn1")); Mockito.verify(mockProtocol).transitionToStandby(anyReqInfo()); }

EqualityVerifier 
@Test public void testNamenodeResolution() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(0,runTool("-getServiceState","nn1")); Mockito.verify(mockProtocol).getServiceStatus(); assertEquals(-1,runTool("-getServiceState","undefined")); assertOutputContains("Unable to determine service address for namenode 'undefined'"); }

EqualityVerifier 
@Test public void testFailoverWithInvalidFenceArg() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand()); tool.setConf(conf); assertEquals(-1,runTool("-failover","nn1","nn2","notforcefence")); }

EqualityVerifier 
@Test public void testForceFenceOptionListedBeforeArgs() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand()); tool.setConf(conf); assertEquals(0,runTool("-failover","--forcefence","nn1","nn2")); }

EqualityVerifier 
@Test public void testHelp() throws Exception { assertEquals(0,runTool("-help")); assertEquals(0,runTool("-help","transitionToActive")); assertOutputContains("Transitions the service into Active"); }

EqualityVerifier 
@Test public void testGetServiceStatus() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(0,runTool("-getServiceState","nn1")); Mockito.verify(mockProtocol).getServiceStatus(); }

EqualityVerifier 
@Test public void testFailoverWithForceActive() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand()); tool.setConf(conf); assertEquals(0,runTool("-failover","nn1","nn2","--forceactive")); }

EqualityVerifier 
@Test public void testFailoverWithNoFencerConfigured() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); assertEquals(-1,runTool("-failover","nn1","nn2")); }

EqualityVerifier 
@Test public void testFailoverWithAutoHa() throws Exception { Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus(); HdfsConfiguration conf=getHAConf(); conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,true); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,getFencerTrueCommand()); tool.setConf(conf); assertEquals(0,runTool("-failover","nn1","nn2")); Mockito.verify(mockZkfcProtocol).gracefulFailover(); }

Class: org.apache.hadoop.hdfs.tools.TestDFSHAAdminMiniCluster

EqualityVerifier 
@Test public void testGetServiceState() throws Exception { assertEquals(0,runTool("-getServiceState","nn1")); assertEquals(0,runTool("-getServiceState","nn2")); cluster.transitionToActive(0); assertEquals(0,runTool("-getServiceState","nn1")); NameNodeAdapter.enterSafeMode(cluster.getNameNode(0),false); assertEquals(0,runTool("-getServiceState","nn1")); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testStateTransition() throws Exception { NameNode nnode1=cluster.getNameNode(0); assertTrue(nnode1.isStandbyState()); assertEquals(0,runTool("-transitionToActive","nn1")); assertFalse(nnode1.isStandbyState()); assertEquals(0,runTool("-transitionToStandby","nn1")); assertTrue(nnode1.isStandbyState()); NameNode nnode2=cluster.getNameNode(1); assertTrue(nnode2.isStandbyState()); assertEquals(0,runTool("-transitionToActive","nn2")); assertFalse(nnode2.isStandbyState()); assertEquals(0,runTool("-transitionToStandby","nn2")); assertTrue(nnode2.isStandbyState()); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTryFailoverToSafeMode() throws Exception { conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,TestDFSHAAdmin.getFencerTrueCommand()); tool.setConf(conf); NameNodeAdapter.enterSafeMode(cluster.getNameNode(0),false); assertEquals(-1,runTool("-failover","nn2","nn1")); assertTrue("Bad output: " + errOutput,errOutput.contains("is not ready to become active: " + "The NameNode is in safemode")); }

EqualityVerifier 
@Test public void testCheckHealth() throws Exception { assertEquals(0,runTool("-checkHealth","nn1")); assertEquals(0,runTool("-checkHealth","nn2")); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test failover with various options */ @Test public void testFencer() throws Exception { assertEquals(-1,runTool("-failover","nn1","nn2")); File tmpFile=File.createTempFile("testFencer",".txt"); tmpFile.deleteOnExit(); if (Shell.WINDOWS) { conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"shell(echo %target_nameserviceid%.%target_namenodeid% " + "%target_port% %dfs_ha_namenode_id% > " + tmpFile.getAbsolutePath() + ")"); } else { conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"shell(echo -n $target_nameserviceid.$target_namenodeid " + "$target_port $dfs_ha_namenode_id > " + tmpFile.getAbsolutePath() + ")"); } tool.setConf(conf); assertEquals(0,runTool("-transitionToActive","nn1")); assertEquals(0,runTool("-failover","nn1","nn2")); assertEquals(0,runTool("-ns","minidfs-ns","-failover","nn2","nn1")); assertEquals("",Files.toString(tmpFile,Charsets.UTF_8)); assertEquals(0,runTool("-failover","nn1","nn2","--forcefence")); String fenceCommandOutput=Files.toString(tmpFile,Charsets.UTF_8).replaceAll(" *[\r\n]+",""); assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1",fenceCommandOutput); tmpFile.delete(); assertEquals(0,runTool("-failover","nn2","nn1","--forceactive")); assertFalse(tmpFile.exists()); conf.unset(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY); tool.setConf(conf); assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence")); assertFalse(tmpFile.exists()); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,"foobar!"); tool.setConf(conf); assertEquals(-1,runTool("-failover","nn1","nn2","--forcefence")); assertFalse(tmpFile.exists()); conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY,TestDFSHAAdmin.getFencerTrueCommand()); tool.setConf(conf); assertEquals(0,runTool("-failover","--forcefence","nn1","nn2")); }

Class: org.apache.hadoop.hdfs.tools.TestDelegationTokenFetcher

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Call fetch token using http server */ @Test public void expectedTokenIsRetrievedFromHttp() throws Exception { final Token testToken=new Token("id".getBytes(),"pwd".getBytes(),FakeRenewer.KIND,new Text("127.0.0.1:1234")); WebHdfsFileSystem fs=mock(WebHdfsFileSystem.class); doReturn(testToken).when(fs).getDelegationToken(anyString()); Path p=new Path(f.getRoot().getAbsolutePath(),tokenFile); DelegationTokenFetcher.saveDelegationToken(conf,fs,null,p); Credentials creds=Credentials.readTokenStorageFile(p,conf); Iterator> itr=creds.getAllTokens().iterator(); assertTrue("token not exist error",itr.hasNext()); Token fetchedToken=itr.next(); Assert.assertArrayEquals("token wrong identifier error",testToken.getIdentifier(),fetchedToken.getIdentifier()); Assert.assertArrayEquals("token wrong password error",testToken.getPassword(),fetchedToken.getPassword()); DelegationTokenFetcher.renewTokens(conf,p); Assert.assertEquals(testToken,FakeRenewer.getLastRenewed()); DelegationTokenFetcher.cancelTokens(conf,p); Assert.assertEquals(testToken,FakeRenewer.getLastCanceled()); }

Class: org.apache.hadoop.hdfs.tools.TestGetConf

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void TestGetConfExcludeCommand() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); localFileSys=FileSystem.getLocal(conf); Path workingDir=localFileSys.getWorkingDirectory(); Path dir=new Path(workingDir,System.getProperty("test.build.data","target/test/data") + "/Getconf/"); Path hostsFile=new Path(dir,"hosts"); Path excludeFile=new Path(dir,"exclude"); conf.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath()); writeConfigFile(hostsFile,null); writeConfigFile(excludeFile,null); String[] args={"-excludeFile"}; String ret=runTool(conf,args,true); assertEquals(excludeFile.toUri().getPath(),ret.trim()); cleanupFile(localFileSys,excludeFile.getParent()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testGetSpecificKey() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); conf.set("mykey"," myval "); String[] args={"-confKey","mykey"}; String toolResult=runTool(conf,args,true); assertEquals(String.format("myval%n"),toolResult); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void TestGetConfIncludeCommand() throws Exception { HdfsConfiguration conf=new HdfsConfiguration(); localFileSys=FileSystem.getLocal(conf); Path workingDir=localFileSys.getWorkingDirectory(); Path dir=new Path(workingDir,System.getProperty("test.build.data","target/test/data") + "/Getconf/"); Path hostsFile=new Path(dir,"hosts"); Path excludeFile=new Path(dir,"exclude"); conf.set(DFSConfigKeys.DFS_HOSTS,hostsFile.toUri().getPath()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE,excludeFile.toUri().getPath()); writeConfigFile(hostsFile,null); writeConfigFile(excludeFile,null); String[] args={"-includeFile"}; String ret=runTool(conf,args,true); assertEquals(hostsFile.toUri().getPath(),ret.trim()); cleanupFile(localFileSys,excludeFile.getParent()); }

Class: org.apache.hadoop.hdfs.tools.offlineEditsViewer.TestOfflineEditsViewer

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRecoveryMode() throws IOException { String edits=nnHelper.generateEdits(); FileOutputStream os=new FileOutputStream(edits,true); FileChannel editsFile=os.getChannel(); editsFile.truncate(editsFile.size() - 5); String editsParsedXml=folder.newFile("editsRecoveredParsed.xml").getAbsolutePath(); String editsReparsed=folder.newFile("editsRecoveredReparsed").getAbsolutePath(); String editsParsedXml2=folder.newFile("editsRecoveredParsed2.xml").getAbsolutePath(); assertEquals(-1,runOev(edits,editsParsedXml,"xml",false)); assertEquals(0,runOev(edits,editsParsedXml,"xml",true)); assertEquals(0,runOev(editsParsedXml,editsReparsed,"binary",false)); assertEquals(0,runOev(editsReparsed,editsParsedXml2,"xml",false)); assertTrue("Test round trip",filesEqualIgnoreTrailingZeros(editsParsedXml,editsParsedXml2)); os.close(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test the OfflineEditsViewer */ @Test public void testGenerated() throws IOException { String edits=nnHelper.generateEdits(); LOG.info("Generated edits=" + edits); String editsParsedXml=folder.newFile("editsParsed.xml").getAbsolutePath(); String editsReparsed=folder.newFile("editsParsed").getAbsolutePath(); assertEquals(0,runOev(edits,editsParsedXml,"xml",false)); assertEquals(0,runOev(editsParsedXml,editsReparsed,"binary",false)); assertTrue("Edits " + edits + " should have all op codes",hasAllOpCodes(edits)); LOG.info("Comparing generated file " + editsReparsed + " with reference file "+ edits); assertTrue("Generated edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(edits,editsReparsed)); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testStored() throws IOException { final String cacheDir=System.getProperty("test.cache.data","build/test/cache"); String editsStored=cacheDir + "/editsStored"; String editsStoredParsedXml=cacheDir + "/editsStoredParsed.xml"; String editsStoredReparsed=cacheDir + "/editsStoredReparsed"; String editsStoredXml=cacheDir + "/editsStored.xml"; assertEquals(0,runOev(editsStored,editsStoredParsedXml,"xml",false)); assertEquals(0,runOev(editsStoredParsedXml,editsStoredReparsed,"binary",false)); assertTrue("Edits " + editsStored + " should have all op codes",hasAllOpCodes(editsStored)); assertTrue("Reference XML edits and parsed to XML should be same",FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),new File(editsStoredParsedXml),"UTF-8")); assertTrue("Reference edits and reparsed (bin to XML to bin) should be same",filesEqualIgnoreTrailingZeros(editsStored,editsStoredReparsed)); }

Class: org.apache.hadoop.hdfs.tools.offlineImageViewer.TestOfflineImageViewer

APIUtilityVerifier EqualityVerifier 
@Test public void testFileDistributionCalculatorWithOptions() throws IOException { int status=OfflineImageViewerPB.run(new String[]{"-i",originalFsimage.getAbsolutePath(),"-o","-","-p","FileDistribution","-maxSize","512","-step","8"}); assertEquals(0,status); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testWebImageViewer() throws IOException, InterruptedException, URISyntaxException { WebImageViewer viewer=new WebImageViewer(NetUtils.createSocketAddr("localhost:0")); try { viewer.initServer(originalFsimage.getAbsolutePath()); int port=viewer.getPort(); URI uri=new URI("webhdfs://localhost:" + String.valueOf(port)); Configuration conf=new Configuration(); WebHdfsFileSystem webhdfs=(WebHdfsFileSystem)FileSystem.get(uri,conf); FileStatus[] statuses=webhdfs.listStatus(new Path("/")); assertEquals(NUM_DIRS + 2,statuses.length); statuses=webhdfs.listStatus(new Path("/dir0")); assertEquals(FILES_PER_DIR,statuses.length); FileStatus status=webhdfs.listStatus(new Path("/dir0/file0"))[0]; FileStatus expected=writtenFiles.get("/dir0/file0"); compareFile(expected,status); statuses=webhdfs.listStatus(new Path("/emptydir")); assertEquals(0,statuses.length); URL url=new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=LISTSTATUS"); verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND,url); url=new URL("http://localhost:" + port + "/webhdfs/v1?op=LISTSTATUS"); verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND,url); status=webhdfs.getFileStatus(new Path("/dir0/file0")); compareFile(expected,status); url=new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=GETFILESTATUS"); verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND,url); url=new URL("http://localhost:" + port + "/webhdfs/v1/?op=INVALID"); verifyHttpResponseCode(HttpURLConnection.HTTP_BAD_REQUEST,url); url=new URL("http://localhost:" + port + "/webhdfs/v1/?op=LISTSTATUS"); HttpURLConnection connection=(HttpURLConnection)url.openConnection(); connection.setRequestMethod("POST"); connection.connect(); assertEquals(HttpURLConnection.HTTP_BAD_METHOD,connection.getResponseCode()); } finally { viewer.shutdown(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testFileDistributionCalculator() throws IOException { StringWriter output=new StringWriter(); PrintWriter o=new PrintWriter(output); new FileDistributionCalculator(new Configuration(),0,0,o).visit(new RandomAccessFile(originalFsimage,"r")); o.close(); Pattern p=Pattern.compile("totalFiles = (\\d+)\n"); Matcher matcher=p.matcher(output.getBuffer()); assertTrue(matcher.find() && matcher.groupCount() == 1); int totalFiles=Integer.parseInt(matcher.group(1)); assertEquals(NUM_DIRS * FILES_PER_DIR,totalFiles); p=Pattern.compile("totalDirectories = (\\d+)\n"); matcher=p.matcher(output.getBuffer()); assertTrue(matcher.find() && matcher.groupCount() == 1); int totalDirs=Integer.parseInt(matcher.group(1)); assertEquals(NUM_DIRS + 3,totalDirs); FileStatus maxFile=Collections.max(writtenFiles.values(),new Comparator(){ @Override public int compare( FileStatus first, FileStatus second){ return first.getLen() < second.getLen() ? -1 : ((first.getLen() == second.getLen()) ? 0 : 1); } } ); p=Pattern.compile("maxFileSize = (\\d+)\n"); matcher=p.matcher(output.getBuffer()); assertTrue(matcher.find() && matcher.groupCount() == 1); assertEquals(maxFile.getLen(),Long.parseLong(matcher.group(1))); }

Class: org.apache.hadoop.hdfs.tools.offlineImageViewer.TestOfflineImageViewerForAcl

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testWebImageViewerForAcl() throws IOException, InterruptedException, URISyntaxException { WebImageViewer viewer=new WebImageViewer(NetUtils.createSocketAddr("localhost:0")); try { viewer.initServer(originalFsimage.getAbsolutePath()); int port=viewer.getPort(); URI uri=new URI("webhdfs://localhost:" + String.valueOf(port)); Configuration conf=new Configuration(); WebHdfsFileSystem webhdfs=(WebHdfsFileSystem)FileSystem.get(uri,conf); AclStatus acl=webhdfs.getAclStatus(new Path("/dirWithNoAcl")); assertEquals(writtenAcls.get("/dirWithNoAcl"),acl); acl=webhdfs.getAclStatus(new Path("/dirWithDefaultAcl")); assertEquals(writtenAcls.get("/dirWithDefaultAcl"),acl); acl=webhdfs.getAclStatus(new Path("/noAcl")); assertEquals(writtenAcls.get("/noAcl"),acl); acl=webhdfs.getAclStatus(new Path("/withAcl")); assertEquals(writtenAcls.get("/withAcl"),acl); acl=webhdfs.getAclStatus(new Path("/withSeveralAcls")); assertEquals(writtenAcls.get("/withSeveralAcls"),acl); URL url=new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=GETACLSTATUS"); HttpURLConnection connection=(HttpURLConnection)url.openConnection(); connection.setRequestMethod("GET"); connection.connect(); assertEquals(HttpURLConnection.HTTP_NOT_FOUND,connection.getResponseCode()); } finally { viewer.shutdown(); } }

Class: org.apache.hadoop.hdfs.util.TestAtomicFileOutputStream

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test case where there is no existing file */ @Test public void testOverwriteFile() throws IOException { assertTrue("Creating empty dst file",DST_FILE.createNewFile()); OutputStream fos=new AtomicFileOutputStream(DST_FILE); assertTrue("Empty file still exists",DST_FILE.exists()); fos.write(TEST_STRING.getBytes()); fos.flush(); assertEquals("",DFSTestUtil.readFile(DST_FILE)); fos.close(); String readBackData=DFSTestUtil.readFile(DST_FILE); assertEquals(TEST_STRING,readBackData); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test case where there is no existing file */ @Test public void testWriteNewFile() throws IOException { OutputStream fos=new AtomicFileOutputStream(DST_FILE); assertFalse(DST_FILE.exists()); fos.write(TEST_STRING.getBytes()); fos.flush(); assertFalse(DST_FILE.exists()); fos.close(); assertTrue(DST_FILE.exists()); String readBackData=DFSTestUtil.readFile(DST_FILE); assertEquals(TEST_STRING,readBackData); }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Test case where the flush() fails at close time - make sure * that we clean up after ourselves and don't touch any * existing file at the destination */ @Test public void testFailToFlush() throws IOException { FileOutputStream fos=new FileOutputStream(DST_FILE); fos.write(TEST_STRING_2.getBytes()); fos.close(); OutputStream failingStream=createFailingStream(); failingStream.write(TEST_STRING.getBytes()); try { failingStream.close(); fail("Close didn't throw exception"); } catch ( IOException ioe) { } assertEquals(TEST_STRING_2,DFSTestUtil.readFile(DST_FILE)); assertEquals("Temporary file should have been cleaned up",DST_FILE.getName(),Joiner.on(",").join(TEST_DIR.list())); }

Class: org.apache.hadoop.hdfs.util.TestBestEffortLongFile

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testTruncatedFileReturnsDefault() throws IOException { assertTrue(FILE.createNewFile()); assertEquals(0,FILE.length()); BestEffortLongFile f=new BestEffortLongFile(FILE,12345L); try { assertEquals(12345L,f.get()); } finally { f.close(); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testGetSet() throws IOException { BestEffortLongFile f=new BestEffortLongFile(FILE,12345L); try { assertEquals(12345L,f.get()); assertTrue(FILE.exists()); Random r=new Random(); for (int i=0; i < 100; i++) { long newVal=r.nextLong(); f.set(newVal); assertEquals(newVal,f.get()); BestEffortLongFile f2=new BestEffortLongFile(FILE,999L); try { assertEquals(newVal,f2.get()); } finally { IOUtils.closeStream(f2); } } } finally { IOUtils.closeStream(f); } }

Class: org.apache.hadoop.hdfs.util.TestChunkedArrayList

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testBasics(){ final int N_ELEMS=100000; ChunkedArrayList l=new ChunkedArrayList(); assertTrue(l.isEmpty()); for (int i=0; i < N_ELEMS; i++) { l.add(i); } assertFalse(l.isEmpty()); assertEquals(N_ELEMS,l.size()); assertTrue(l.getNumChunks() > 10); assertEquals(8192,l.getMaxChunkSize()); }

EqualityVerifier 
@Test public void testIterator(){ ChunkedArrayList l=new ChunkedArrayList(); for (int i=0; i < 30000; i++) { l.add(i); } int i=0; for ( int fromList : l) { assertEquals(i,fromList); i++; } }

Class: org.apache.hadoop.hdfs.util.TestExactSizeInputStream

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testReadArrayNotEnough() throws IOException { ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5); byte[] buf=new byte[10]; assertEquals(2,s.read(buf,0,5)); try { s.read(buf,2,3); fail("Read buf when should be out of data"); } catch ( EOFException e) { } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testSkipNotEnough() throws IOException { ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5); assertEquals(2,s.skip(3)); try { s.skip(1); fail("Skip when should be out of data"); } catch ( EOFException e) { } }

InternalCallVerifier EqualityVerifier 
@Test public void testBasicsSkip() throws IOException { ExactSizeInputStream s=new ExactSizeInputStream(byteStream("hello"),3); assertEquals(3,s.available()); assertEquals(2,s.skip(2)); assertEquals(1,s.skip(2)); assertEquals(0,s.skip(2)); }

InternalCallVerifier EqualityVerifier 
@Test public void testBasicsReadArray() throws IOException { ExactSizeInputStream s=new ExactSizeInputStream(byteStream("hello"),3); assertEquals(3,s.available()); byte[] buf=new byte[10]; assertEquals(2,s.read(buf,0,2)); assertEquals('h',buf[0]); assertEquals('e',buf[1]); assertEquals(1,s.read(buf,0,2)); assertEquals('l',buf[0]); assertEquals(-1,s.read(buf,0,2)); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testReadNotEnough() throws IOException { ExactSizeInputStream s=new ExactSizeInputStream(byteStream("he"),5); assertEquals(2,s.available()); assertEquals((int)'h',s.read()); assertEquals((int)'e',s.read()); try { s.read(); fail("Read when should be out of data"); } catch ( EOFException e) { } }

InternalCallVerifier EqualityVerifier 
@Test public void testBasicsReadSingle() throws IOException { ExactSizeInputStream s=new ExactSizeInputStream(byteStream("hello"),3); assertEquals(3,s.available()); assertEquals((int)'h',s.read()); assertEquals((int)'e',s.read()); assertEquals((int)'l',s.read()); assertEquals(-1,s.read()); assertEquals(0,s.available()); }

Class: org.apache.hadoop.hdfs.util.TestLightWeightHashSet

InternalCallVerifier EqualityVerifier 
@Test public void testCapacity(){ LOG.info("Test capacity"); float maxF=LightWeightHashSet.DEFAULT_MAX_LOAD_FACTOR; float minF=LightWeightHashSet.DEFAUT_MIN_LOAD_FACTOR; set=new LightWeightHashSet(1,maxF,minF); assertEquals(LightWeightHashSet.MINIMUM_CAPACITY,set.getCapacity()); set=new LightWeightHashSet(30,maxF,minF); assertEquals(Math.max(LightWeightHashSet.MINIMUM_CAPACITY,32),set.getCapacity()); set=new LightWeightHashSet(64,maxF,minF); assertEquals(Math.max(LightWeightHashSet.MINIMUM_CAPACITY,64),set.getCapacity()); set.addAll(list); int expCap=LightWeightHashSet.MINIMUM_CAPACITY; while (expCap < NUM && maxF * expCap < NUM) expCap<<=1; assertEquals(expCap,set.getCapacity()); set.clear(); set.addAll(list); int toRemove=set.size() - (int)(set.getCapacity() * minF) + 1; for (int i=0; i < toRemove; i++) { set.remove(list.get(i)); } assertEquals(Math.max(LightWeightHashSet.MINIMUM_CAPACITY,expCap / 2),set.getCapacity()); LOG.info("Test capacity - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollNMulti(){ LOG.info("Test pollN multi"); set.addAll(list); List poll=set.pollN(0); assertEquals(0,poll.size()); for ( Integer i : list) { assertTrue(set.contains(i)); } poll=set.pollN(10); assertEquals(10,poll.size()); for ( Integer i : poll) { assertTrue(list.contains(i)); assertFalse(set.contains(i)); } poll=set.pollN(1000); assertEquals(NUM - 10,poll.size()); for ( Integer i : poll) { assertTrue(list.contains(i)); } assertTrue(set.isEmpty()); assertEquals(0,set.size()); LOG.info("Test pollN multi - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemoveOne(){ LOG.info("Test remove one"); assertTrue(set.add(list.get(0))); assertEquals(1,set.size()); assertTrue(set.remove(list.get(0))); assertEquals(0,set.size()); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); assertTrue(set.add(list.get(0))); assertEquals(1,set.size()); iter=set.iterator(); assertTrue(iter.hasNext()); LOG.info("Test remove one - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOther(){ LOG.info("Test other"); assertTrue(set.addAll(list)); assertTrue(set.removeAll(list)); assertTrue(set.isEmpty()); List sub=new LinkedList(); for (int i=0; i < 10; i++) { sub.add(list.get(i)); } assertTrue(set.addAll(list)); assertTrue(set.removeAll(sub)); assertFalse(set.isEmpty()); assertEquals(NUM - 10,set.size()); for ( Integer i : sub) { assertFalse(set.contains(i)); } assertFalse(set.containsAll(sub)); List sub2=new LinkedList(); for (int i=10; i < NUM; i++) { sub2.add(list.get(i)); } assertTrue(set.containsAll(sub2)); Integer[] array=set.toArray(new Integer[0]); assertEquals(NUM - 10,array.length); for (int i=0; i < array.length; i++) { assertTrue(sub2.contains(array[i])); } assertEquals(NUM - 10,set.size()); Object[] array2=set.toArray(); assertEquals(NUM - 10,array2.length); for (int i=0; i < array2.length; i++) { assertTrue(sub2.contains(array2[i])); } LOG.info("Test other - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollNMultiArray(){ LOG.info("Test pollN multi array"); set.addAll(list); Integer[] poll=new Integer[10]; poll=set.pollToArray(poll); assertEquals(10,poll.length); for ( Integer i : poll) { assertTrue(list.contains(i)); assertFalse(set.contains(i)); } poll=new Integer[NUM]; poll=set.pollToArray(poll); assertEquals(NUM - 10,poll.length); for (int i=0; i < NUM - 10; i++) { assertTrue(list.contains(poll[i])); } assertTrue(set.isEmpty()); assertEquals(0,set.size()); set.addAll(list); poll=new Integer[NUM]; poll=set.pollToArray(poll); assertTrue(set.isEmpty()); assertEquals(0,set.size()); assertEquals(NUM,poll.length); for (int i=0; i < NUM; i++) { assertTrue(list.contains(poll[i])); } set.addAll(list); poll=new Integer[0]; poll=set.pollToArray(poll); for (int i=0; i < NUM; i++) { assertTrue(set.contains(list.get(i))); } assertEquals(0,poll.length); LOG.info("Test pollN multi array- DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollAll(){ LOG.info("Test poll all"); for ( Integer i : list) { assertTrue(set.add(i)); } List poll=set.pollAll(); assertEquals(0,set.size()); assertTrue(set.isEmpty()); for (int i=0; i < NUM; i++) { assertFalse(set.contains(list.get(i))); } for ( Integer i : poll) { assertTrue(list.contains(i)); } Iterator iter=set.iterator(); assertFalse(iter.hasNext()); LOG.info("Test poll all - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testEmptyBasic(){ LOG.info("Test empty basic"); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); assertEquals(0,set.size()); assertTrue(set.isEmpty()); LOG.info("Test empty - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOneElementBasic(){ LOG.info("Test one element basic"); set.add(list.get(0)); assertEquals(1,set.size()); assertFalse(set.isEmpty()); Iterator iter=set.iterator(); assertTrue(iter.hasNext()); assertEquals(list.get(0),iter.next()); assertFalse(iter.hasNext()); LOG.info("Test one element basic - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testClear(){ LOG.info("Test clear"); set.addAll(list); assertEquals(NUM,set.size()); assertFalse(set.isEmpty()); set.clear(); assertEquals(0,set.size()); assertTrue(set.isEmpty()); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); LOG.info("Test clear - DONE"); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiBasic(){ LOG.info("Test multi element basic"); for ( Integer i : list) { assertTrue(set.add(i)); } assertEquals(list.size(),set.size()); for ( Integer i : list) { assertTrue(set.contains(i)); } for ( Integer i : list) { assertFalse(set.add(i)); } for ( Integer i : list) { assertTrue(set.contains(i)); } Iterator iter=set.iterator(); int num=0; while (iter.hasNext()) { Integer next=iter.next(); assertNotNull(next); assertTrue(list.contains(next)); num++; } assertEquals(list.size(),num); LOG.info("Test multi element basic - DONE"); }

Class: org.apache.hadoop.hdfs.util.TestLightWeightLinkedSet

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollMulti(){ LOG.info("Test poll multi"); for ( Integer i : list) { assertTrue(set.add(i)); } for (int i=0; i < NUM / 2; i++) { assertEquals(list.get(i),set.pollFirst()); } assertEquals(NUM / 2,set.size()); for (int i=0; i < NUM / 2; i++) { assertFalse(set.contains(list.get(i))); } for (int i=NUM / 2; i < NUM; i++) { assertTrue(set.contains(list.get(i))); } Iterator iter=set.iterator(); int num=NUM / 2; while (iter.hasNext()) { assertEquals(list.get(num++),iter.next()); } assertEquals(num,NUM); for (int i=0; i < NUM / 2; i++) { assertTrue(set.add(list.get(i))); } assertEquals(NUM,set.size()); for (int i=NUM / 2; i < NUM; i++) { assertEquals(list.get(i),set.pollFirst()); } for (int i=0; i < NUM / 2; i++) { assertEquals(list.get(i),set.pollFirst()); } assertEquals(0,set.size()); assertTrue(set.isEmpty()); LOG.info("Test poll multi - DONE"); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testResetBookmarkPlacesBookmarkAtHead(){ set.addAll(list); Iterator it=set.getBookmark(); final int numAdvance=set.size() / 2; for (int i=0; i < numAdvance; i++) { it.next(); } assertEquals(it.next(),list.get(numAdvance)); set.resetBookmark(); it=set.getBookmark(); assertEquals(it.next(),list.get(0)); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testBookmarkSetToHeadOnAddToEmpty(){ LOG.info("Test bookmark is set after adding to previously empty set."); Iterator it=set.getBookmark(); assertFalse(it.hasNext()); set.add(list.get(0)); set.add(list.get(1)); it=set.getBookmark(); assertTrue(it.hasNext()); assertEquals(it.next(),list.get(0)); assertEquals(it.next(),list.get(1)); assertFalse(it.hasNext()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testGetBookmarkReturnsBookmarkIterator(){ LOG.info("Test getBookmark returns proper iterator"); assertTrue(set.addAll(list)); Iterator bookmark=set.getBookmark(); assertEquals(bookmark.next(),list.get(0)); final int numAdvance=list.size() / 2; for (int i=1; i < numAdvance; i++) { bookmark.next(); } Iterator bookmark2=set.getBookmark(); assertEquals(bookmark2.next(),list.get(numAdvance)); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRemoveMulti(){ LOG.info("Test remove multi"); for ( Integer i : list) { assertTrue(set.add(i)); } for (int i=0; i < NUM / 2; i++) { assertTrue(set.remove(list.get(i))); } for (int i=0; i < NUM / 2; i++) { assertFalse(set.contains(list.get(i))); } for (int i=NUM / 2; i < NUM; i++) { assertTrue(set.contains(list.get(i))); } Iterator iter=set.iterator(); int num=NUM / 2; while (iter.hasNext()) { assertEquals(list.get(num++),iter.next()); } assertEquals(num,NUM); LOG.info("Test remove multi - DONE"); }

InternalCallVerifier EqualityVerifier 
@Test public void testPollNOne(){ LOG.info("Test pollN one"); set.add(list.get(0)); List l=set.pollN(10); assertEquals(1,l.size()); assertEquals(list.get(0),l.get(0)); LOG.info("Test pollN one - DONE"); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testEmptyBasic(){ LOG.info("Test empty basic"); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); assertEquals(0,set.size()); assertTrue(set.isEmpty()); assertNull(set.pollFirst()); assertEquals(0,set.pollAll().size()); assertEquals(0,set.pollN(10).size()); LOG.info("Test empty - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMultiBasic(){ LOG.info("Test multi element basic"); for ( Integer i : list) { assertTrue(set.add(i)); } assertEquals(list.size(),set.size()); for ( Integer i : list) { assertTrue(set.contains(i)); } for ( Integer i : list) { assertFalse(set.add(i)); } for ( Integer i : list) { assertTrue(set.contains(i)); } Iterator iter=set.iterator(); int num=0; while (iter.hasNext()) { assertEquals(list.get(num++),iter.next()); } assertEquals(list.size(),num); LOG.info("Test multi element basic - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testBookmarkAdvancesOnRemoveOfSameElement(){ LOG.info("Test that the bookmark advances if we remove its element."); assertTrue(set.add(list.get(0))); assertTrue(set.add(list.get(1))); assertTrue(set.add(list.get(2))); Iterator it=set.getBookmark(); assertEquals(it.next(),list.get(0)); set.remove(list.get(1)); it=set.getBookmark(); assertEquals(it.next(),list.get(2)); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPollOneElement(){ LOG.info("Test poll one element"); set.add(list.get(0)); assertEquals(list.get(0),set.pollFirst()); assertNull(set.pollFirst()); LOG.info("Test poll one element - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollAll(){ LOG.info("Test poll all"); for ( Integer i : list) { assertTrue(set.add(i)); } while (set.pollFirst() != null) ; assertEquals(0,set.size()); assertTrue(set.isEmpty()); for (int i=0; i < NUM; i++) { assertFalse(set.contains(list.get(i))); } Iterator iter=set.iterator(); assertFalse(iter.hasNext()); LOG.info("Test poll all - DONE"); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRemoveOne(){ LOG.info("Test remove one"); assertTrue(set.add(list.get(0))); assertEquals(1,set.size()); assertTrue(set.remove(list.get(0))); assertEquals(0,set.size()); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); assertNull(set.pollFirst()); assertEquals(0,set.pollAll().size()); assertEquals(0,set.pollN(10).size()); assertTrue(set.add(list.get(0))); assertEquals(1,set.size()); iter=set.iterator(); assertTrue(iter.hasNext()); LOG.info("Test remove one - DONE"); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testClear(){ LOG.info("Test clear"); set.addAll(list); assertEquals(NUM,set.size()); assertFalse(set.isEmpty()); Iterator bkmrkIt=set.getBookmark(); for (int i=0; i < set.size() / 2 + 1; i++) { bkmrkIt.next(); } assertTrue(bkmrkIt.hasNext()); set.clear(); assertEquals(0,set.size()); assertTrue(set.isEmpty()); bkmrkIt=set.getBookmark(); assertFalse(bkmrkIt.hasNext()); assertEquals(0,set.pollAll().size()); assertEquals(0,set.pollN(10).size()); assertNull(set.pollFirst()); Iterator iter=set.iterator(); assertFalse(iter.hasNext()); LOG.info("Test clear - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOther(){ LOG.info("Test other"); assertTrue(set.addAll(list)); Integer[] array=set.toArray(new Integer[0]); assertEquals(NUM,array.length); for (int i=0; i < array.length; i++) { assertTrue(list.contains(array[i])); } assertEquals(NUM,set.size()); Object[] array2=set.toArray(); assertEquals(NUM,array2.length); for (int i=0; i < array2.length; i++) { assertTrue(list.contains(array2[i])); } LOG.info("Test capacity - DONE"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOneElementBasic(){ LOG.info("Test one element basic"); set.add(list.get(0)); assertEquals(1,set.size()); assertFalse(set.isEmpty()); Iterator iter=set.iterator(); assertTrue(iter.hasNext()); assertEquals(list.get(0),iter.next()); assertFalse(iter.hasNext()); LOG.info("Test one element basic - DONE"); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPollNMulti(){ LOG.info("Test pollN multi"); set.addAll(list); List l=set.pollN(10); assertEquals(10,l.size()); for (int i=0; i < 10; i++) { assertEquals(list.get(i),l.get(i)); } l=set.pollN(1000); assertEquals(NUM - 10,l.size()); for (int i=10; i < NUM; i++) { assertEquals(list.get(i),l.get(i - 10)); } assertTrue(set.isEmpty()); assertEquals(0,set.size()); LOG.info("Test pollN multi - DONE"); }

Class: org.apache.hadoop.hdfs.util.TestMD5FileUtils

APIUtilityVerifier EqualityVerifier 
@Test public void testComputeMd5ForFile() throws Exception { MD5Hash computedDigest=MD5FileUtils.computeMd5ForFile(TEST_FILE); assertEquals(TEST_MD5,computedDigest); }

Class: org.apache.hadoop.hdfs.web.TestAuthFilter

InternalCallVerifier EqualityVerifier 
@Test public void testGetSimpleAuthDefaultConfiguration() throws ServletException { AuthFilter filter=new AuthFilter(); Map m=new HashMap(); FilterConfig config=new DummyFilterConfig(m); Properties p=filter.getConfiguration("random",config); Assert.assertEquals("true",p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetConfiguration() throws ServletException { AuthFilter filter=new AuthFilter(); Map m=new HashMap(); m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,"xyz/thehost@REALM"); m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,"thekeytab"); FilterConfig config=new DummyFilterConfig(m); Properties p=filter.getConfiguration("random",config); Assert.assertEquals("xyz/thehost@REALM",p.getProperty("kerberos.principal")); Assert.assertEquals("thekeytab",p.getProperty("kerberos.keytab")); Assert.assertEquals("true",p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetSimpleAuthDisabledConfiguration() throws ServletException { AuthFilter filter=new AuthFilter(); Map m=new HashMap(); m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED,"false"); FilterConfig config=new DummyFilterConfig(m); Properties p=filter.getConfiguration("random",config); Assert.assertEquals("false",p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED)); }

Class: org.apache.hadoop.hdfs.web.TestByteRangeInputStream

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testByteRange() throws IOException { ByteRangeInputStream.URLOpener oMock=getMockURLOpener(new URL("http://test")); ByteRangeInputStream.URLOpener rMock=getMockURLOpener(null); ByteRangeInputStream bris=new ByteRangeInputStreamImpl(oMock,rMock); bris.seek(0); assertEquals("getPos wrong",0,bris.getPos()); bris.read(); assertEquals("Initial call made incorrectly (offset check)",0,bris.startPos); assertEquals("getPos should return 1 after reading one byte",1,bris.getPos()); verify(oMock,times(1)).connect(0,false); bris.read(); assertEquals("getPos should return 2 after reading two bytes",2,bris.getPos()); verify(oMock,times(1)).connect(0,false); rMock.setURL(new URL("http://resolvedurl/")); bris.seek(100); bris.read(); assertEquals("Seek to 100 bytes made incorrectly (offset Check)",100,bris.startPos); assertEquals("getPos should return 101 after reading one byte",101,bris.getPos()); verify(rMock,times(1)).connect(100,true); bris.seek(101); bris.read(); verify(rMock,times(1)).connect(100,true); verify(rMock,times(0)).connect(101,true); bris.seek(2500); bris.read(); assertEquals("Seek to 2500 bytes made incorrectly (offset Check)",2500,bris.startPos); doReturn(getMockConnection(null)).when(rMock).connect(anyLong(),anyBoolean()); bris.seek(500); try { bris.read(); fail("Exception should be thrown when content-length is not given"); } catch ( IOException e) { assertTrue("Incorrect response message: " + e.getMessage(),e.getMessage().startsWith(HttpHeaders.CONTENT_LENGTH + " is missing: ")); } bris.close(); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPropagatedClose() throws IOException { ByteRangeInputStream bris=mock(ByteRangeInputStream.class,CALLS_REAL_METHODS); InputStream mockStream=mock(InputStream.class); doReturn(mockStream).when(bris).openInputStream(); Whitebox.setInternalState(bris,"status",ByteRangeInputStream.StreamStatus.SEEK); int brisOpens=0; int brisCloses=0; int isCloses=0; bris.getInputStream(); verify(bris,times(++brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(isCloses)).close(); bris.getInputStream(); verify(bris,times(brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(isCloses)).close(); bris.seek(1); bris.getInputStream(); verify(bris,times(++brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(++isCloses)).close(); bris.getInputStream(); verify(bris,times(brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(isCloses)).close(); bris.seek(1); bris.getInputStream(); verify(bris,times(brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(isCloses)).close(); bris.close(); verify(bris,times(++brisCloses)).close(); verify(mockStream,times(++isCloses)).close(); bris.close(); verify(bris,times(++brisCloses)).close(); verify(mockStream,times(isCloses)).close(); boolean errored=false; try { bris.getInputStream(); } catch ( IOException e) { errored=true; assertEquals("Stream closed",e.getMessage()); } finally { assertTrue("Read a closed steam",errored); } verify(bris,times(brisOpens)).openInputStream(); verify(bris,times(brisCloses)).close(); verify(mockStream,times(isCloses)).close(); }

Class: org.apache.hadoop.hdfs.web.TestFSMainOperationsWebHdfs

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testConcat() throws Exception { Path[] paths={new Path("/test/hadoop/file1"),new Path("/test/hadoop/file2"),new Path("/test/hadoop/file3")}; DFSTestUtil.createFile(fSys,paths[0],1024,(short)3,0); DFSTestUtil.createFile(fSys,paths[1],1024,(short)3,0); DFSTestUtil.createFile(fSys,paths[2],1024,(short)3,0); Path catPath=new Path("/test/hadoop/catFile"); DFSTestUtil.createFile(fSys,catPath,1024,(short)3,0); Assert.assertTrue(exists(fSys,catPath)); fSys.concat(catPath,paths); Assert.assertFalse(exists(fSys,paths[0])); Assert.assertFalse(exists(fSys,paths[1])); Assert.assertFalse(exists(fSys,paths[2])); FileStatus fileStatus=fSys.getFileStatus(catPath); Assert.assertEquals(1024 * 4,fileStatus.getLen()); }

Class: org.apache.hadoop.hdfs.web.TestHttpsFileSystem

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSWebHdfsFileSystem() throws Exception { FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,"swebhdfs"); final Path f=new Path("/testswebhdfs"); FSDataOutputStream os=fs.create(f); os.write(23); os.close(); Assert.assertTrue(fs.exists(f)); InputStream is=fs.open(f); Assert.assertEquals(23,is.read()); is.close(); fs.close(); }

Class: org.apache.hadoop.hdfs.web.TestJsonUtil

InternalCallVerifier EqualityVerifier 
@Test public void testToJsonFromAclStatus(){ String jsonString="{\"AclStatus\":{\"entries\":[\"user:user1:rwx\",\"group::rw-\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}"; AclStatus.Builder aclStatusBuilder=new AclStatus.Builder(); aclStatusBuilder.owner("testuser"); aclStatusBuilder.group("supergroup"); aclStatusBuilder.stickyBit(false); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,"user1",ALL),aclEntry(ACCESS,GROUP,READ_WRITE)); aclStatusBuilder.addEntries(aclSpec); Assert.assertEquals(jsonString,JsonUtil.toJsonString(aclStatusBuilder.build())); }

APIUtilityVerifier IterativeVerifier EqualityVerifier 
@Test public void testToXAttrMap() throws IOException { String jsonString="{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," + "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}"; Map json=(Map)JSON.parse(jsonString); XAttr xAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build(); XAttr xAttr2=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build(); List xAttrs=Lists.newArrayList(); xAttrs.add(xAttr1); xAttrs.add(xAttr2); Map xAttrMap=XAttrHelper.buildXAttrMap(xAttrs); Map parsedXAttrMap=JsonUtil.toXAttrs(json); Assert.assertEquals(xAttrMap.size(),parsedXAttrMap.size()); Iterator> iter=xAttrMap.entrySet().iterator(); while (iter.hasNext()) { Entry entry=iter.next(); Assert.assertArrayEquals(entry.getValue(),parsedXAttrMap.get(entry.getKey())); } }

APIUtilityVerifier EqualityVerifier 
@Test public void testHdfsFileStatus(){ final long now=Time.now(); final String parent="/dir"; final HdfsFileStatus status=new HdfsFileStatus(1001L,false,3,1L << 26,now,now + 10,new FsPermission((short)0644),"user","group",DFSUtil.string2Bytes("bar"),DFSUtil.string2Bytes("foo"),INodeId.GRANDFATHER_INODE_ID,0,null); final FileStatus fstatus=toFileStatus(status,parent); System.out.println("status = " + status); System.out.println("fstatus = " + fstatus); final String json=JsonUtil.toJsonString(status,true); System.out.println("json = " + json.replace(",",",\n ")); final HdfsFileStatus s2=JsonUtil.toFileStatus((Map)JSON.parse(json),true); final FileStatus fs2=toFileStatus(s2,parent); System.out.println("s2 = " + s2); System.out.println("fs2 = " + fs2); Assert.assertEquals(fstatus,fs2); }

APIUtilityVerifier EqualityVerifier 
@Test public void testGetXAttrFromJson() throws IOException { String jsonString="{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," + "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}"; Map json=(Map)JSON.parse(jsonString); byte[] value=JsonUtil.getXAttr(json,"user.a2"); Assert.assertArrayEquals(XAttrCodec.decodeValue("0x313131"),value); }

EqualityVerifier 
@Test public void testToJsonFromXAttrs() throws IOException { String jsonString="{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," + "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}"; XAttr xAttr1=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a1").setValue(XAttrCodec.decodeValue("0x313233")).build(); XAttr xAttr2=(new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).setName("a2").setValue(XAttrCodec.decodeValue("0x313131")).build(); List xAttrs=Lists.newArrayList(); xAttrs.add(xAttr1); xAttrs.add(xAttr2); Assert.assertEquals(jsonString,JsonUtil.toJsonString(xAttrs,XAttrCodec.HEX)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testToDatanodeInfoWithName() throws Exception { Map response=new HashMap(); String name="127.0.0.1:1004"; response.put("name",name); response.put("hostName","localhost"); response.put("storageID","fake-id"); response.put("infoPort",1338l); response.put("ipcPort",1339l); response.put("capacity",1024l); response.put("dfsUsed",512l); response.put("remaining",512l); response.put("blockPoolUsed",512l); response.put("lastUpdate",0l); response.put("xceiverCount",4096l); response.put("networkLocation","foo.bar.baz"); response.put("adminState","NORMAL"); response.put("cacheCapacity",123l); response.put("cacheUsed",321l); DatanodeInfo di=JsonUtil.toDatanodeInfo(response); Assert.assertEquals(name,di.getXferAddr()); Map r=JsonUtil.toJsonMap(di); Assert.assertEquals(name,r.get("name")); Assert.assertEquals("127.0.0.1",r.get("ipAddr")); Assert.assertEquals(1004,(int)(Integer)r.get("xferPort")); String[] badNames={"127.0.0.1","127.0.0.1:",":","127.0.0.1:sweet",":123"}; for ( String badName : badNames) { response.put("name",badName); checkDecodeFailure(response); } response.remove("name"); checkDecodeFailure(response); response.put("ipAddr","127.0.0.1"); checkDecodeFailure(response); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testToAclStatus(){ String jsonString="{\"AclStatus\":{\"entries\":[\"user::rwx\",\"user:user1:rw-\",\"group::rw-\",\"other::r-x\"],\"group\":\"supergroup\",\"owner\":\"testuser\",\"stickyBit\":false}}"; Map json=(Map)JSON.parse(jsonString); List aclSpec=Lists.newArrayList(aclEntry(ACCESS,USER,ALL),aclEntry(ACCESS,USER,"user1",READ_WRITE),aclEntry(ACCESS,GROUP,READ_WRITE),aclEntry(ACCESS,OTHER,READ_EXECUTE)); AclStatus.Builder aclStatusBuilder=new AclStatus.Builder(); aclStatusBuilder.owner("testuser"); aclStatusBuilder.group("supergroup"); aclStatusBuilder.addEntries(aclSpec); aclStatusBuilder.stickyBit(false); Assert.assertEquals("Should be equal",aclStatusBuilder.build(),JsonUtil.toAclStatus(json)); }

Class: org.apache.hadoop.hdfs.web.TestOffsetUrlInputStream

EqualityVerifier 
@Test public void testRemoveOffset() throws IOException { { String s="http://test/Abc?Length=99"; assertEquals(s,WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString()); } { String s="http://test/Abc"; assertEquals(s,WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString()); } { String s="http://test/Abc?offset=10&Length=99"; assertEquals("http://test/Abc?Length=99",WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString()); } { String s="http://test/Abc?op=read&OFFset=10&Length=99"; assertEquals("http://test/Abc?op=read&Length=99",WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString()); } { String s="http://test/Abc?Length=99&offset=10"; assertEquals("http://test/Abc?Length=99",WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString()); } { String s="http://test/Abc?offset=10"; assertEquals("http://test/Abc",WebHdfsFileSystem.removeOffsetParam(new URL(s)).toString()); } }

Class: org.apache.hadoop.hdfs.web.TestTokenAspect

EqualityVerifier 
@Test public void testGetRemoteTokenFailure() throws IOException, URISyntaxException { Configuration conf=new Configuration(); DummyFs fs=spy(new DummyFs()); IOException e=new IOException(); doThrow(e).when(fs).getDelegationToken(anyString()); fs.emulateSecurityEnabled=true; fs.initialize(new URI("dummyfs://127.0.0.1:1234"),conf); try { fs.tokenAspect.ensureTokenInitialized(); } catch ( IOException exc) { assertEquals(e,exc); } }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testInitWithUGIToken() throws IOException, URISyntaxException { Configuration conf=new Configuration(); DummyFs fs=spy(new DummyFs()); doReturn(null).when(fs).getDelegationToken(anyString()); Token token=new Token(new byte[0],new byte[0],DummyFs.TOKEN_KIND,new Text("127.0.0.1:1234")); fs.ugi.addToken(token); fs.ugi.addToken(new Token(new byte[0],new byte[0],new Text("Other token"),new Text("127.0.0.1:8021"))); assertEquals("wrong tokens in user",2,fs.ugi.getTokens().size()); fs.emulateSecurityEnabled=true; fs.initialize(new URI("dummyfs://127.0.0.1:1234"),conf); fs.tokenAspect.ensureTokenInitialized(); verify(fs).setDelegationToken(token); verify(fs,never()).getDelegationToken(anyString()); assertNull(Whitebox.getInternalState(fs.tokenAspect,"dtRenewer")); assertNull(Whitebox.getInternalState(fs.tokenAspect,"action")); }

Class: org.apache.hadoop.hdfs.web.TestURLConnectionFactory

EqualityVerifier 
@Test public void testConnConfiguratior() throws IOException { final URL u=new URL("http://localhost"); final List conns=Lists.newArrayList(); URLConnectionFactory fc=new URLConnectionFactory(new ConnectionConfigurator(){ @Override public HttpURLConnection configure( HttpURLConnection conn) throws IOException { Assert.assertEquals(u,conn.getURL()); conns.add(conn); return conn; } } ); fc.openConnection(u); Assert.assertEquals(1,conns.size()); }

Class: org.apache.hadoop.hdfs.web.TestWebHDFS

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=300000) public void testLargeDirectory() throws Exception { final Configuration conf=WebHdfsTestUtil.createConf(); final int listLimit=2; conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT,listLimit); FsPermission.setUMask(conf,new FsPermission((short)0077)); final MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); try { cluster.waitActive(); WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME).setPermission(new Path("/"),new FsPermission(FsAction.ALL,FsAction.ALL,FsAction.ALL)); UserGroupInformation.setLoginUser(UserGroupInformation.createUserForTesting("not-superuser",new String[]{"not-supergroup"})); UserGroupInformation.createUserForTesting("me",new String[]{"my-group"}).doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws IOException, URISyntaxException { FileSystem fs=WebHdfsTestUtil.getWebHdfsFileSystem(conf,WebHdfsFileSystem.SCHEME); Path d=new Path("/my-dir"); Assert.assertTrue(fs.mkdirs(d)); for (int i=0; i < listLimit * 3; i++) { Path p=new Path(d,"file-" + i); Assert.assertTrue(fs.createNewFile(p)); } Assert.assertEquals(listLimit * 3,fs.listStatus(d).length); return null; } } ); } finally { cluster.shutdown(); } }

Class: org.apache.hadoop.hdfs.web.TestWebHDFSForHA

APIUtilityVerifier EqualityVerifier 
@Test public void testMultipleNamespacesConfigured() throws Exception { Configuration conf=DFSTestUtil.newHAConfiguration(LOGICAL_NAME); MiniDFSCluster cluster=null; WebHdfsFileSystem fs=null; try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster,conf,LOGICAL_NAME); cluster.waitActive(); DFSTestUtil.addHAConfiguration(conf,LOGICAL_NAME + "remote"); DFSTestUtil.setFakeHttpAddresses(conf,LOGICAL_NAME + "remote"); fs=(WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI,conf); Assert.assertEquals(2,fs.getResolvedNNAddr().length); } finally { IOUtils.cleanup(null,fs); if (cluster != null) { cluster.shutdown(); } } }

EqualityVerifier PublicFieldVerifier 
@Test public void testFailoverAfterOpen() throws IOException { Configuration conf=DFSTestUtil.newHAConfiguration(LOGICAL_NAME); conf.set(FS_DEFAULT_NAME_KEY,HdfsConstants.HDFS_URI_SCHEME + "://" + LOGICAL_NAME); MiniDFSCluster cluster=null; FileSystem fs=null; final Path p=new Path("/test"); final byte[] data="Hello".getBytes(); try { cluster=new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster,conf,LOGICAL_NAME); cluster.waitActive(); fs=FileSystem.get(WEBHDFS_URI,conf); cluster.transitionToActive(1); FSDataOutputStream out=fs.create(p); cluster.shutdownNameNode(1); cluster.transitionToActive(0); out.write(data); out.close(); FSDataInputStream in=fs.open(p); byte[] buf=new byte[data.length]; IOUtils.readFully(in,buf,0,buf.length); Assert.assertArrayEquals(data,buf); } finally { IOUtils.cleanup(null,fs); if (cluster != null) { cluster.shutdown(); } } }

Class: org.apache.hadoop.hdfs.web.TestWebHdfsContentLength

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetOpWithRedirect(){ Future future1=contentLengthFuture(redirectResponse); Future future2=contentLengthFuture(errResponse); try { fs.open(p).read(); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals(null,getContentLength(future1)); Assert.assertEquals(null,getContentLength(future2)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testPutOpWithRedirect(){ Future future1=contentLengthFuture(redirectResponse); Future future2=contentLengthFuture(errResponse); try { FSDataOutputStream os=fs.create(p); os.write(new byte[]{0}); os.close(); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals("0",getContentLength(future1)); Assert.assertEquals("chunked",getContentLength(future2)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testDelete(){ Future future=contentLengthFuture(errResponse); try { fs.delete(p,false); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals(null,getContentLength(future)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testPutOp(){ Future future=contentLengthFuture(errResponse); try { fs.mkdirs(p); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals("0",getContentLength(future)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testPostOp(){ Future future=contentLengthFuture(errResponse); try { fs.concat(p,new Path[]{p}); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals("0",getContentLength(future)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testPostOpWithRedirect(){ Future future1=contentLengthFuture(redirectResponse); Future future2=contentLengthFuture(errResponse); try { FSDataOutputStream os=fs.append(p); os.write(new byte[]{0}); os.close(); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals("0",getContentLength(future1)); Assert.assertEquals("chunked",getContentLength(future2)); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testGetOp() throws Exception { Future future=contentLengthFuture(errResponse); try { fs.getFileStatus(p); Assert.fail(); } catch ( IOException ioe) { } Assert.assertEquals(null,getContentLength(future)); }

Class: org.apache.hadoop.hdfs.web.TestWebHdfsTimeouts

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Expect connect timeout, because the connection backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testConnectTimeout() throws Exception { consumeConnectionBacklog(); try { fs.listFiles(new Path("/"),false); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("connect timed out",e.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * On the second step of two-step write, expect read timeout accessing the * redirect location, because the bogus server never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testTwoStepWriteReadTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(false); OutputStream os=null; try { os=fs.create(new Path("/file")); os.close(); os=null; fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("Read timed out",e.getMessage()); } finally { IOUtils.cleanup(LOG,os); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Expect read timeout, because the bogus server never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testReadTimeout() throws Exception { try { fs.listFiles(new Path("/"),false); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("Read timed out",e.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * After a redirect, expect read timeout accessing the redirect location, * because the bogus server never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testRedirectReadTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(false); try { fs.getFileChecksum(new Path("/file")); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("Read timed out",e.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * After a redirect, expect connect timeout accessing the redirect location, * because the connection backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testRedirectConnectTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(true); try { fs.getFileChecksum(new Path("/file")); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("connect timed out",e.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * On the second step of two-step write, expect connect timeout accessing the * redirect location, because the connection backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testTwoStepWriteConnectTimeout() throws Exception { startSingleTemporaryRedirectResponseThread(true); OutputStream os=null; try { os=fs.create(new Path("/file")); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("connect timed out",e.getMessage()); } finally { IOUtils.cleanup(LOG,os); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Expect read timeout on a URL that requires auth, because the bogus server * never sends a reply. */ @Test(timeout=TEST_TIMEOUT) public void testAuthUrlReadTimeout() throws Exception { try { fs.getDelegationToken("renewer"); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("Read timed out",e.getMessage()); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Expect connect timeout on a URL that requires auth, because the connection * backlog is consumed. */ @Test(timeout=TEST_TIMEOUT) public void testAuthUrlConnectTimeout() throws Exception { consumeConnectionBacklog(); try { fs.getDelegationToken("renewer"); fail("expected timeout"); } catch ( SocketTimeoutException e) { assertEquals("connect timed out",e.getMessage()); } }

Class: org.apache.hadoop.hdfs.web.TestWebHdfsTokens

EqualityVerifier 
@Test(timeout=1000) public void testGetOpRequireAuth(){ for ( HttpOpParam.Op op : GetOpParam.Op.values()) { boolean expect=(op == GetOpParam.Op.GETDELEGATIONTOKEN); assertEquals(expect,op.getRequireAuth()); } }

EqualityVerifier 
@Test(timeout=1000) public void testPutOpRequireAuth(){ for ( HttpOpParam.Op op : PutOpParam.Op.values()) { boolean expect=(op == PutOpParam.Op.RENEWDELEGATIONTOKEN || op == PutOpParam.Op.CANCELDELEGATIONTOKEN); assertEquals(expect,op.getRequireAuth()); } }

Class: org.apache.hadoop.hdfs.web.TestWebHdfsUrl

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=60000) public void testEncodedPathUrl() throws IOException, URISyntaxException { Configuration conf=new Configuration(); final WebHdfsFileSystem webhdfs=(WebHdfsFileSystem)FileSystem.get(uri,conf); String pathName="/hdtest010%2C60020%2C1371000602151.1371058984668"; Path fsPath=new Path(pathName); URL encodedPathUrl=webhdfs.toUrl(PutOpParam.Op.CREATE,fsPath); Assert.assertEquals(WebHdfsFileSystem.PATH_PREFIX + pathName,encodedPathUrl.toURI().getPath()); }

Class: org.apache.hadoop.hdfs.web.TestWebHdfsWithMultipleNameNodes

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testRedirect() throws Exception { final String dir="/testRedirect/"; final String filename="file"; final Path p=new Path(dir,filename); final String[] writeStrings=createStrings("write to webhdfs ","write"); final String[] appendStrings=createStrings("append to webhdfs ","append"); for (int i=0; i < webhdfs.length; i++) { final FSDataOutputStream out=webhdfs[i].create(p); out.write(writeStrings[i].getBytes()); out.close(); } for (int i=0; i < webhdfs.length; i++) { final long expected=writeStrings[i].length(); Assert.assertEquals(expected,webhdfs[i].getFileStatus(p).getLen()); } for (int i=0; i < webhdfs.length; i++) { final FSDataInputStream in=webhdfs[i].open(p); for (int c, j=0; (c=in.read()) != -1; j++) { Assert.assertEquals(writeStrings[i].charAt(j),c); } in.close(); } for (int i=0; i < webhdfs.length; i++) { final FSDataOutputStream out=webhdfs[i].append(p); out.write(appendStrings[i].getBytes()); out.close(); } for (int i=0; i < webhdfs.length; i++) { final long expected=writeStrings[i].length() + appendStrings[i].length(); Assert.assertEquals(expected,webhdfs[i].getFileStatus(p).getLen()); } for (int i=0; i < webhdfs.length; i++) { final StringBuilder b=new StringBuilder(); final FSDataInputStream in=webhdfs[i].open(p); for (int c; (c=in.read()) != -1; ) { b.append((char)c); } final int wlen=writeStrings[i].length(); Assert.assertEquals(writeStrings[i],b.substring(0,wlen)); Assert.assertEquals(appendStrings[i],b.substring(wlen)); in.close(); } }

Class: org.apache.hadoop.hdfs.web.resources.TestParam

APIUtilityVerifier EqualityVerifier 
@Test public void testToSortedStringEscapesURICharacters(){ final String sep="&"; Param ampParam=new TokenArgumentParam("token&ersand"); Param equalParam=new RenewerParam("renewer=equal"); final String expected="&renewer=renewer%3Dequal&token=token%26ampersand"; final String actual=Param.toSortedString(sep,equalParam,ampParam); Assert.assertEquals(expected,actual); }

EqualityVerifier 
@Test public void testRenewerParam(){ final RenewerParam p=new RenewerParam(RenewerParam.DEFAULT); Assert.assertEquals(null,p.getValue()); }

EqualityVerifier 
@Test public void testXAttrNameParam(){ final XAttrNameParam p=new XAttrNameParam("user.a1"); Assert.assertEquals(p.getXAttrName(),"user.a1"); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testAclPermissionParam(){ final AclPermissionParam p=new AclPermissionParam("user::rwx,group::r--,other::rwx,user:user1:rwx"); List setAclList=AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",true); Assert.assertEquals(setAclList.toString(),p.getAclPermission(true).toString()); new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx"); try { new AclPermissionParam("user::rw--,group::rwx-,other::rw-"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx,group:group1:rwx,other::rwx,mask::rwx,default:user:user1:rwx"); try { new AclPermissionParam("user:r-,group:rwx,other:rw-"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } try { new AclPermissionParam("default:::r-,default:group::rwx,other::rw-"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } try { new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testModificationTimeParam(){ final ModificationTimeParam p=new ModificationTimeParam(ModificationTimeParam.DEFAULT); Assert.assertEquals(-1L,p.getValue().longValue()); new ModificationTimeParam(-1L); try { new ModificationTimeParam(-2L); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testReplicationParam(){ final ReplicationParam p=new ReplicationParam(ReplicationParam.DEFAULT); Assert.assertEquals(null,p.getValue()); Assert.assertEquals((short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,DFSConfigKeys.DFS_REPLICATION_DEFAULT),p.getValue(conf)); new ReplicationParam((short)1); try { new ReplicationParam((short)0); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

APIUtilityVerifier IterativeVerifier EqualityVerifier 
@Test public void testConcatSourcesParam(){ final String[] strings={"/","/foo","/bar"}; for (int n=0; n < strings.length; n++) { final String[] sub=new String[n]; final Path[] paths=new Path[n]; for (int i=0; i < paths.length; i++) { paths[i]=new Path(sub[i]=strings[i]); } final String expected=StringUtils.join(",",Arrays.asList(sub)); final ConcatSourcesParam computed=new ConcatSourcesParam(paths); Assert.assertEquals(expected,computed.getValue()); } }

EqualityVerifier 
@Test public void testGroupParam(){ final GroupParam p=new GroupParam(GroupParam.DEFAULT); Assert.assertEquals(null,p.getValue()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testBufferSizeParam(){ final BufferSizeParam p=new BufferSizeParam(BufferSizeParam.DEFAULT); Assert.assertEquals(null,p.getValue()); Assert.assertEquals(conf.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),p.getValue(conf)); new BufferSizeParam(1); try { new BufferSizeParam(0); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testRecursiveParam(){ final RecursiveParam p=new RecursiveParam(RecursiveParam.DEFAULT); Assert.assertEquals(false,p.getValue()); new RecursiveParam("falSe"); try { new RecursiveParam("abc"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testOverwriteParam(){ final OverwriteParam p=new OverwriteParam(OverwriteParam.DEFAULT); Assert.assertEquals(false,p.getValue()); new OverwriteParam("trUe"); try { new OverwriteParam("abc"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

InternalCallVerifier EqualityVerifier 
@Test public void testRenameOptionSetParam(){ final RenameOptionSetParam p=new RenameOptionSetParam(Options.Rename.OVERWRITE,Options.Rename.NONE); final RenameOptionSetParam p1=new RenameOptionSetParam(p.getValueString()); Assert.assertEquals(p1.getValue(),EnumSet.of(Options.Rename.OVERWRITE,Options.Rename.NONE)); }

InternalCallVerifier EqualityVerifier 
@Test public void testXAttrEncodingParam(){ final XAttrEncodingParam p=new XAttrEncodingParam(XAttrCodec.BASE64); Assert.assertEquals(p.getEncoding(),XAttrCodec.BASE64); final XAttrEncodingParam p1=new XAttrEncodingParam(p.getValueString()); Assert.assertEquals(p1.getEncoding(),XAttrCodec.BASE64); }

EqualityVerifier 
@Test public void testSnapshotNameParam(){ final OldSnapshotNameParam s1=new OldSnapshotNameParam("s1"); final SnapshotNameParam s2=new SnapshotNameParam("s2"); Assert.assertEquals("s1",s1.getValue()); Assert.assertEquals("s2",s2.getValue()); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testDestinationParam(){ final DestinationParam p=new DestinationParam(DestinationParam.DEFAULT); Assert.assertEquals(null,p.getValue()); new DestinationParam("/abc"); try { new DestinationParam("abc"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testPermissionParam(){ final PermissionParam p=new PermissionParam(PermissionParam.DEFAULT); Assert.assertEquals(new FsPermission((short)0755),p.getFsPermission()); new PermissionParam("0"); try { new PermissionParam("-1"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } new PermissionParam("1777"); try { new PermissionParam("2000"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } try { new PermissionParam("8"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } try { new PermissionParam("abc"); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

EqualityVerifier 
@Test public void testXAttrValueParam() throws IOException { final XAttrValueParam p=new XAttrValueParam("0x313233"); Assert.assertArrayEquals(p.getXAttrValue(),XAttrCodec.decodeValue("0x313233")); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testBlockSizeParam(){ final BlockSizeParam p=new BlockSizeParam(BlockSizeParam.DEFAULT); Assert.assertEquals(null,p.getValue()); Assert.assertEquals(conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),p.getValue(conf)); new BlockSizeParam(1L); try { new BlockSizeParam(0L); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

InternalCallVerifier EqualityVerifier 
@Test public void testXAttrSetFlagParam(){ EnumSet flag=EnumSet.of(XAttrSetFlag.CREATE,XAttrSetFlag.REPLACE); final XAttrSetFlagParam p=new XAttrSetFlagParam(flag); Assert.assertEquals(p.getFlag(),flag); final XAttrSetFlagParam p1=new XAttrSetFlagParam(p.getValueString()); Assert.assertEquals(p1.getFlag(),flag); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testAccessTimeParam(){ final AccessTimeParam p=new AccessTimeParam(AccessTimeParam.DEFAULT); Assert.assertEquals(-1L,p.getValue().longValue()); new AccessTimeParam(-1L); try { new AccessTimeParam(-2L); Assert.fail(); } catch ( IllegalArgumentException e) { LOG.info("EXPECTED: " + e); } }

EqualityVerifier 
@Test public void testDelegationParam(){ final DelegationParam p=new DelegationParam(DelegationParam.DEFAULT); Assert.assertEquals(null,p.getValue()); }

EqualityVerifier 
@Test public void testOwnerParam(){ final OwnerParam p=new OwnerParam(OwnerParam.DEFAULT); Assert.assertEquals(null,p.getValue()); }

Class: org.apache.hadoop.http.TestHtmlQuoting

InternalCallVerifier EqualityVerifier 
@Test public void testRequestQuoting() throws Exception { HttpServletRequest mockReq=Mockito.mock(HttpServletRequest.class); HttpServer2.QuotingInputFilter.RequestQuoter quoter=new HttpServer2.QuotingInputFilter.RequestQuoter(mockReq); Mockito.doReturn("a

EqualityVerifier 
@Test public void testQuoting() throws Exception { assertEquals("ab<cd",HtmlQuoting.quoteHtmlChars("ab")); assertEquals("&&&",HtmlQuoting.quoteHtmlChars("&&&")); assertEquals(" '\n",HtmlQuoting.quoteHtmlChars(" '\n")); assertEquals(""",HtmlQuoting.quoteHtmlChars("\"")); assertEquals(null,HtmlQuoting.quoteHtmlChars(null)); }

Class: org.apache.hadoop.http.TestHttpRequestLog

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAppenderDefined(){ HttpRequestLogAppender requestLogAppender=new HttpRequestLogAppender(); requestLogAppender.setName("testrequestlog"); Logger.getLogger("http.requests.test").addAppender(requestLogAppender); RequestLog requestLog=HttpRequestLog.getRequestLog("test"); Logger.getLogger("http.requests.test").removeAppender(requestLogAppender); assertNotNull("RequestLog should not be null",requestLog); assertEquals("Class mismatch",NCSARequestLog.class,requestLog.getClass()); }

Class: org.apache.hadoop.http.TestHttpRequestLogAppender

InternalCallVerifier EqualityVerifier 
@Test public void testParameterPropagation(){ HttpRequestLogAppender requestLogAppender=new HttpRequestLogAppender(); requestLogAppender.setFilename("jetty-namenode-yyyy_mm_dd.log"); requestLogAppender.setRetainDays(17); assertEquals("Filename mismatch","jetty-namenode-yyyy_mm_dd.log",requestLogAppender.getFilename()); assertEquals("Retain days mismatch",17,requestLogAppender.getRetainDays()); }

Class: org.apache.hadoop.http.TestHttpServer

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Verify the administrator access for /logs, /stacks, /conf, /logLevel and * /metrics servlets. * @throws Exception */ @Test public void testAuthorizationOfDefaultServlets() throws Exception { Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,true); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,true); conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA",Arrays.asList("groupA")); MyGroupsProvider.mapping.put("userB",Arrays.asList("groupB")); MyGroupsProvider.mapping.put("userC",Arrays.asList("groupC")); MyGroupsProvider.mapping.put("userD",Arrays.asList("groupD")); MyGroupsProvider.mapping.put("userE",Arrays.asList("groupE")); HttpServer2 myServer=new HttpServer2.Builder().setName("test").addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf).setACL(new AccessControlList("userA,userB groupC,groupD")).build(); myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE,conf); myServer.start(); String serverURL="http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for ( String servlet : new String[]{"conf","logs","stacks","logLevel","metrics"}) { for ( String user : new String[]{"userA","userB","userC","userD"}) { assertEquals(HttpURLConnection.HTTP_OK,getHttpStatusCode(serverURL + servlet,user)); } assertEquals(HttpURLConnection.HTTP_FORBIDDEN,getHttpStatusCode(serverURL + servlet,"userE")); } myServer.stop(); }

EqualityVerifier 
@Test public void testEcho() throws Exception { assertEquals("a:b\nc:d\n",readOutput(new URL(baseUrl,"/echo?a=b&c=d"))); assertEquals("a:b\nc<:d\ne:>\n",readOutput(new URL(baseUrl,"/echo?a=b&c<=d&e=>"))); }

APIUtilityVerifier EqualityVerifier 
@Test public void testContentTypes() throws Exception { URL cssUrl=new URL(baseUrl,"/static/test.css"); HttpURLConnection conn=(HttpURLConnection)cssUrl.openConnection(); conn.connect(); assertEquals(200,conn.getResponseCode()); assertEquals("text/css",conn.getContentType()); URL servletUrl=new URL(baseUrl,"/echo?a=b"); conn=(HttpURLConnection)servletUrl.openConnection(); conn.connect(); assertEquals(200,conn.getResponseCode()); assertEquals("text/plain; charset=utf-8",conn.getContentType()); servletUrl=new URL(baseUrl,"/echo?a=b.css"); conn=(HttpURLConnection)servletUrl.openConnection(); conn.connect(); assertEquals(200,conn.getResponseCode()); assertEquals("text/plain; charset=utf-8",conn.getContentType()); servletUrl=new URL(baseUrl,"/htmlcontent"); conn=(HttpURLConnection)servletUrl.openConnection(); conn.connect(); assertEquals(200,conn.getResponseCode()); assertEquals("text/html; charset=utf-8",conn.getContentType()); }

InternalCallVerifier EqualityVerifier 
@Test public void testRequestQuoterWithNull() throws Exception { HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.doReturn(null).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter=new RequestQuoter(request); String[] parameterValues=requestQuoter.getParameterValues("dummy"); Assert.assertEquals("It should return null " + "when there are no values for the parameter",null,parameterValues); }

APIUtilityVerifier EqualityVerifier 
/** * Test that verifies headers can be up to 64K long. * The test adds a 63K header leaving 1K for other headers. * This is because the header buffer setting is for ALL headers, * names and values included. */ @Test public void testLongHeader() throws Exception { URL url=new URL(baseUrl,"/longheader"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); StringBuilder sb=new StringBuilder(); for (int i=0; i < 63 * 1024; i++) { sb.append("a"); } conn.setRequestProperty("longheader",sb.toString()); assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testBindAddress() throws Exception { checkBindAddress("localhost",0,false).stop(); HttpServer2 myServer=checkBindAddress("localhost",0,false); HttpServer2 myServer2=null; try { int port=myServer.getConnectorAddress(0).getPort(); myServer2=checkBindAddress("localhost",port,true); port=myServer2.getConnectorAddress(0).getPort(); myServer2.stop(); assertNull(myServer2.getConnectorAddress(0)); myServer2.openListeners(); assertEquals(port,myServer2.getConnectorAddress(0).getPort()); } finally { myServer.stop(); if (myServer2 != null) { myServer2.stop(); } } }

BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test the maximum number of threads cannot be exceeded. */ @Test public void testMaxThreads() throws Exception { int clientThreads=MAX_THREADS * 10; Executor executor=Executors.newFixedThreadPool(clientThreads); final CountDownLatch ready=new CountDownLatch(clientThreads); final CountDownLatch start=new CountDownLatch(1); for (int i=0; i < clientThreads; i++) { executor.execute(new Runnable(){ @Override public void run(){ ready.countDown(); try { start.await(); assertEquals("a:b\nc:d\n",readOutput(new URL(baseUrl,"/echo?a=b&c=d"))); int serverThreads=server.webServer.getThreadPool().getThreads(); assertTrue("More threads are started than expected, Server Threads count: " + serverThreads,serverThreads <= MAX_THREADS); System.out.println("Number of threads = " + serverThreads + " which is less or equal than the max = "+ MAX_THREADS); } catch ( Exception e) { } } } ); } ready.await(); start.countDown(); }

EqualityVerifier 
/** * Test the echo map servlet that uses getParameterMap. */ @Test public void testEchoMap() throws Exception { assertEquals("a:b\nc:d\n",readOutput(new URL(baseUrl,"/echomap?a=b&c=d"))); assertEquals("a:b,>\nc<:d\n",readOutput(new URL(baseUrl,"/echomap?a=b&c<=d&a=>"))); }

InternalCallVerifier EqualityVerifier 
/** * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics * servlets, when authentication filters are set, but authorization is not * enabled. * @throws Exception */ @Test public void testDisabledAuthorizationOfDefaultServlets() throws Exception { Configuration conf=new Configuration(); conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA",Arrays.asList("groupA")); MyGroupsProvider.mapping.put("userB",Arrays.asList("groupB")); HttpServer2 myServer=new HttpServer2.Builder().setName("test").addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE,conf); myServer.start(); String serverURL="http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for ( String servlet : new String[]{"conf","logs","stacks","logLevel","metrics"}) { for ( String user : new String[]{"userA","userB"}) { assertEquals(HttpURLConnection.HTTP_OK,getHttpStatusCode(serverURL + servlet,user)); } } myServer.stop(); }

APIUtilityVerifier EqualityVerifier 
@Test public void testJersey() throws Exception { LOG.info("BEGIN testJersey()"); final String js=readOutput(new URL(baseUrl,"/jersey/foo?op=bar")); final Map m=parse(js); LOG.info("m=" + m); assertEquals("foo",m.get(JerseyResource.PATH)); assertEquals("bar",m.get(JerseyResource.OP)); LOG.info("END testJersey()"); }

APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testNoCacheHeader() throws Exception { URL url=new URL(baseUrl,"/echo?a=b&c=d"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); assertEquals("no-cache",conn.getHeaderField("Cache-Control")); assertEquals("no-cache",conn.getHeaderField("Pragma")); assertNotNull(conn.getHeaderField("Expires")); assertNotNull(conn.getHeaderField("Date")); assertEquals(conn.getHeaderField("Expires"),conn.getHeaderField("Date")); }

Class: org.apache.hadoop.http.TestHttpServerLifecycle

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test that the server is alive once started * @throws Throwableon failure */ @Test public void testWepAppContextAfterServerStop() throws Throwable { HttpServer2 server=null; String key="test.attribute.key"; String value="test.attribute.value"; server=createTestServer(); assertNotLive(server); server.start(); server.setAttribute(key,value); assertAlive(server); assertEquals(value,server.getAttribute(key)); stop(server); assertNull("Server context should have cleared",server.getAttribute(key)); }

Class: org.apache.hadoop.http.TestSSLHttpServer

EqualityVerifier 
@Test public void testEcho() throws Exception { assertEquals("a:b\nc:d\n",readOut(new URL(baseUrl,"/echo?a=b&c=d"))); assertEquals("a:b\nc<:d\ne:>\n",readOut(new URL(baseUrl,"/echo?a=b&c<=d&e=>"))); }

Class: org.apache.hadoop.http.TestServletFilter

IterativeVerifier BranchVerifier EqualityVerifier 
@Test public void testServletFilter() throws Exception { Configuration conf=new Configuration(); conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,SimpleFilter.Initializer.class.getName()); HttpServer2 http=createTestServer(conf); http.start(); final String fsckURL="/fsck"; final String stacksURL="/stacks"; final String ajspURL="/a.jsp"; final String logURL="/logs/a.log"; final String hadooplogoURL="/static/hadoop-logo.jpg"; final String[] urls={fsckURL,stacksURL,ajspURL,logURL,hadooplogoURL}; final Random ran=new Random(); final int[] sequence=new int[50]; for (int i=0; i < sequence.length; i++) { sequence[i]=ran.nextInt(urls.length); } final String prefix="http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (int i=0; i < sequence.length; i++) { access(prefix + urls[sequence[i]]); if (sequence[i] == 0) { assertEquals(null,uri); } else { assertEquals(urls[sequence[i]],uri); uri=null; } } } finally { http.stop(); } }

Class: org.apache.hadoop.http.lib.TestStaticUserWebFilter

InternalCallVerifier EqualityVerifier 
@Test public void testFilter() throws Exception { FilterConfig config=mockConfig("myuser"); StaticUserFilter suf=new StaticUserFilter(); suf.init(config); ArgumentCaptor wrapperArg=ArgumentCaptor.forClass(HttpServletRequestWrapper.class); FilterChain chain=mock(FilterChain.class); suf.doFilter(mock(HttpServletRequest.class),mock(ServletResponse.class),chain); Mockito.verify(chain).doFilter(wrapperArg.capture(),Mockito.anyObject()); HttpServletRequestWrapper wrapper=wrapperArg.getValue(); assertEquals("myuser",wrapper.getUserPrincipal().getName()); assertEquals("myuser",wrapper.getRemoteUser()); suf.destroy(); }

InternalCallVerifier EqualityVerifier 
@Test public void testConfiguration(){ Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER,"joe"); assertEquals("joe",StaticUserWebFilter.getUsernameFromConf(conf)); }

InternalCallVerifier EqualityVerifier 
@Test public void testOldStyleConfiguration(){ Configuration conf=new Configuration(); conf.set("dfs.web.ugi","joe,group1,group2"); assertEquals("joe",StaticUserWebFilter.getUsernameFromConf(conf)); }

Class: org.apache.hadoop.io.TestArrayPrimitiveWritable

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testOldFormat() throws IOException { ObjectWritable.writeObject(out,i,i.getClass(),null); in.reset(out.getData(),out.getLength()); @SuppressWarnings("deprecation") String className=UTF8.readString(in); assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not labelled as an array of int",i.getClass().getName(),className); int length=in.readInt(); assertEquals("The int[] written by ObjectWritable as a non-compact array " + "was not expected length",i.length,length); int[] readValue=new int[length]; try { for (int i=0; i < length; i++) { readValue[i]=(int)((Integer)ObjectWritable.readObject(in,null)); } } catch ( Exception e) { fail("The int[] written by ObjectWritable as a non-compact array " + "was corrupted. Failed to correctly read int[] of length " + length + ". Got exception:\n"+ StringUtils.stringifyException(e)); } assertTrue("The int[] written by ObjectWritable as a non-compact array " + "was corrupted.",Arrays.equals(i,readValue)); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("deprecation") public void testObjectLabeling() throws IOException { ObjectWritable.writeObject(out,i,i.getClass(),null,true); ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable(i); ObjectWritable.writeObject(out,apw,apw.getClass(),null,true); in.reset(out.getData(),out.getLength()); String className=UTF8.readString(in); assertEquals("The int[] written by ObjectWritable was not labelled as " + "an ArrayPrimitiveWritable.Internal",ArrayPrimitiveWritable.Internal.class.getName(),className); ArrayPrimitiveWritable.Internal apwi=new ArrayPrimitiveWritable.Internal(); apwi.readFields(in); assertEquals("The ArrayPrimitiveWritable.Internal component type was corrupted",int.class,apw.getComponentType()); assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable.Internal was corrupted",Arrays.equals(i,(int[])(apwi.get()))); String declaredClassName=UTF8.readString(in); assertEquals("The APW written by ObjectWritable was not labelled as " + "declaredClass ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),declaredClassName); className=UTF8.readString(in); assertEquals("The APW written by ObjectWritable was not labelled as " + "class ArrayPrimitiveWritable",ArrayPrimitiveWritable.class.getName(),className); ArrayPrimitiveWritable apw2=new ArrayPrimitiveWritable(); apw2.readFields(in); assertEquals("The ArrayPrimitiveWritable component type was corrupted",int.class,apw2.getComponentType()); assertTrue("The int[] written by ObjectWritable as " + "ArrayPrimitiveWritable was corrupted",Arrays.equals(i,(int[])(apw2.get()))); }

IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMany() throws IOException { for ( Object x : bigSet) { ObjectWritable.writeObject(out,x,x.getClass(),null,true); (new ArrayPrimitiveWritable(x)).write(out); } in.reset(out.getData(),out.getLength()); for (int x=0; x < resultSet.length; ) { resultSet[x++]=ObjectWritable.readObject(in,null); ArrayPrimitiveWritable apw=new ArrayPrimitiveWritable(); apw.readFields(in); resultSet[x++]=apw.get(); } assertEquals(expectedResultSet.length,resultSet.length); for (int x=0; x < resultSet.length; x++) { assertEquals("ComponentType of array " + x,expectedResultSet[x].getClass().getComponentType(),resultSet[x].getClass().getComponentType()); } assertTrue("In and Out arrays didn't match values",Arrays.deepEquals(expectedResultSet,resultSet)); }

Class: org.apache.hadoop.io.TestBooleanWritable

BooleanVerifier EqualityVerifier HybridVerifier 
/** * test {@link BooleanWritable} methods hashCode(), equals(), compareTo() */ @Test public void testCommonMethods(){ assertTrue("testCommonMethods1 error !!!",newInstance(true).equals(newInstance(true))); assertTrue("testCommonMethods2 error !!!",newInstance(false).equals(newInstance(false))); assertFalse("testCommonMethods3 error !!!",newInstance(false).equals(newInstance(true))); assertTrue("testCommonMethods4 error !!!",checkHashCode(newInstance(true),newInstance(true))); assertFalse("testCommonMethods5 error !!! ",checkHashCode(newInstance(true),newInstance(false))); assertTrue("testCommonMethods6 error !!!",newInstance(true).compareTo(newInstance(false)) > 0); assertTrue("testCommonMethods7 error !!!",newInstance(false).compareTo(newInstance(true)) < 0); assertTrue("testCommonMethods8 error !!!",newInstance(false).compareTo(newInstance(false)) == 0); assertEquals("testCommonMethods9 error !!!","true",newInstance(true).toString()); }

EqualityVerifier 
@Test public void testCompareUnequalWritables() throws Exception { DataOutputBuffer bTrue=writeWritable(new BooleanWritable(true)); DataOutputBuffer bFalse=writeWritable(new BooleanWritable(false)); WritableComparator writableComparator=WritableComparator.get(BooleanWritable.class); assertEquals(0,compare(writableComparator,bTrue,bTrue)); assertEquals(0,compare(writableComparator,bFalse,bFalse)); assertEquals(1,compare(writableComparator,bTrue,bFalse)); assertEquals(-1,compare(writableComparator,bFalse,bTrue)); }

Class: org.apache.hadoop.io.TestBytesWritable

InternalCallVerifier EqualityVerifier 
@Test public void testHash() throws Exception { byte[] owen="owen".getBytes(); BytesWritable buf=new BytesWritable(owen); assertEquals(4347922,buf.hashCode()); buf.setCapacity(10000); assertEquals(4347922,buf.hashCode()); buf.setSize(0); assertEquals(1,buf.hashCode()); }

InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * This test was written as result of adding the new zero * copy constructor and set method to BytesWritable. These * methods allow users to specify the backing buffer of the * BytesWritable instance and a length. */ @Test public void testZeroCopy(){ byte[] bytes="brock".getBytes(); BytesWritable zeroBuf=new BytesWritable(bytes,bytes.length); BytesWritable copyBuf=new BytesWritable(bytes); assertTrue("copy took place, backing array != array passed to constructor",bytes == zeroBuf.getBytes()); assertTrue("length of BW should backing byte array",zeroBuf.getLength() == bytes.length); assertEquals("objects with same backing array should be equal",zeroBuf,copyBuf); assertEquals("string repr of objects with same backing array should be equal",zeroBuf.toString(),copyBuf.toString()); assertTrue("compare order objects with same backing array should be equal",zeroBuf.compareTo(copyBuf) == 0); assertTrue("hash of objects with same backing array should be equal",zeroBuf.hashCode() == copyBuf.hashCode()); byte[] buffer=new byte[bytes.length * 5]; zeroBuf.set(buffer,0,buffer.length); zeroBuf.set(bytes,0,bytes.length); assertEquals("buffer created with (array, len) has bad contents",zeroBuf,copyBuf); assertTrue("buffer created with (array, len) has bad length",zeroBuf.getLength() == copyBuf.getLength()); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSizeChange() throws Exception { byte[] hadoop="hadoop".getBytes(); BytesWritable buf=new BytesWritable(hadoop); int size=buf.getLength(); int orig_capacity=buf.getCapacity(); buf.setSize(size * 2); int new_capacity=buf.getCapacity(); System.arraycopy(buf.getBytes(),0,buf.getBytes(),size,size); assertTrue(new_capacity >= size * 2); assertEquals(size * 2,buf.getLength()); assertTrue(new_capacity != orig_capacity); buf.setSize(size * 4); assertTrue(new_capacity != buf.getCapacity()); for (int i=0; i < size * 2; ++i) { assertEquals(hadoop[i % size],buf.getBytes()[i]); } assertEquals(size * 4,buf.copyBytes().length); buf.setCapacity(1); assertEquals(1,buf.getLength()); assertEquals(hadoop[0],buf.getBytes()[0]); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test {@link ByteWritable} * methods compareTo(), toString(), equals() */ @Test public void testObjectCommonMethods(){ byte b=0x9; ByteWritable bw=new ByteWritable(); bw.set(b); assertTrue("testSetByteWritable error",bw.get() == b); assertTrue("testSetByteWritable error < 0",bw.compareTo(new ByteWritable((byte)0xA)) < 0); assertTrue("testSetByteWritable error > 0",bw.compareTo(new ByteWritable((byte)0x8)) > 0); assertTrue("testSetByteWritable error == 0",bw.compareTo(new ByteWritable((byte)0x9)) == 0); assertTrue("testSetByteWritable equals error !!!",bw.equals(new ByteWritable((byte)0x9))); assertTrue("testSetByteWritable equals error !!!",!bw.equals(new ByteWritable((byte)0xA))); assertTrue("testSetByteWritable equals error !!!",!bw.equals(new IntWritable(1))); assertEquals("testSetByteWritable error ","9",bw.toString()); }

Class: org.apache.hadoop.io.TestDataByteBuffers

InternalCallVerifier EqualityVerifier 
@Test public void testDataOutputByteBufferCompatibility() throws IOException { DataOutputBuffer dob=new DataOutputBuffer(); DataOutputByteBuffer dobb=new DataOutputByteBuffer(); Random r=new Random(); long seed=r.nextLong(); r.setSeed(seed); System.out.println("SEED: " + seed); writeJunk(dob,r,seed,1000); writeJunk(dobb,r,seed,1000); byte[] check=toBytes(dobb.getData(),dobb.getLength()); assertEquals(check.length,dob.getLength()); assertArrayEquals(check,Arrays.copyOf(dob.getData(),dob.getLength())); dob.reset(); dobb.reset(); writeJunk(dob,r,seed,3000); writeJunk(dobb,r,seed,3000); check=toBytes(dobb.getData(),dobb.getLength()); assertEquals(check.length,dob.getLength()); assertArrayEquals(check,Arrays.copyOf(dob.getData(),dob.getLength())); dob.reset(); dobb.reset(); writeJunk(dob,r,seed,1000); writeJunk(dobb,r,seed,1000); check=toBytes(dobb.getData(),dobb.getLength()); assertEquals("Failed Checking length = " + check.length,check.length,dob.getLength()); assertArrayEquals(check,Arrays.copyOf(dob.getData(),dob.getLength())); }

Class: org.apache.hadoop.io.TestIOUtils

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testSkipFully() throws IOException { byte inArray[]=new byte[]{0,1,2,3,4}; ByteArrayInputStream in=new ByteArrayInputStream(inArray); try { in.mark(inArray.length); IOUtils.skipFully(in,2); IOUtils.skipFully(in,2); try { IOUtils.skipFully(in,2); fail("expected to get a PrematureEOFException"); } catch ( EOFException e) { assertEquals("Premature EOF from inputStream " + "after skipping 1 byte(s).",e.getMessage()); } in.reset(); try { IOUtils.skipFully(in,20); fail("expected to get a PrematureEOFException"); } catch ( EOFException e) { assertEquals("Premature EOF from inputStream " + "after skipping 5 byte(s).",e.getMessage()); } in.reset(); IOUtils.skipFully(in,5); try { IOUtils.skipFully(in,10); fail("expected to get a PrematureEOFException"); } catch ( EOFException e) { assertEquals("Premature EOF from inputStream " + "after skipping 0 byte(s).",e.getMessage()); } } finally { in.close(); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testWrappedReadForCompressedData() throws IOException { byte[] buf=new byte[2]; InputStream mockStream=Mockito.mock(InputStream.class); Mockito.when(mockStream.read(buf,0,1)).thenReturn(1); Mockito.when(mockStream.read(buf,0,2)).thenThrow(new java.lang.InternalError()); try { assertEquals("Check expected value",1,IOUtils.wrappedReadForCompressedData(mockStream,buf,0,1)); } catch ( IOException ioe) { fail("Unexpected error while reading"); } try { IOUtils.wrappedReadForCompressedData(mockStream,buf,0,2); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("Error while reading compressed data",ioe); } }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCopyBytesWithCountShouldThrowOutTheStreamClosureExceptions() throws Exception { InputStream inputStream=Mockito.mock(InputStream.class); OutputStream outputStream=Mockito.mock(OutputStream.class); Mockito.doReturn(-1).when(inputStream).read(new byte[4096],0,1); Mockito.doThrow(new IOException("Exception in closing the stream")).when(outputStream).close(); try { IOUtils.copyBytes(inputStream,outputStream,(long)1,true); fail("Should throw out the exception"); } catch ( IOException e) { assertEquals("Not throwing the expected exception.","Exception in closing the stream",e.getMessage()); } Mockito.verify(inputStream,Mockito.atLeastOnce()).close(); Mockito.verify(outputStream,Mockito.atLeastOnce()).close(); }

IterativeVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testWriteFully() throws IOException { final int INPUT_BUFFER_LEN=10000; final int HALFWAY=1 + (INPUT_BUFFER_LEN / 2); byte[] input=new byte[INPUT_BUFFER_LEN]; for (int i=0; i < input.length; i++) { input[i]=(byte)(i & 0xff); } byte[] output=new byte[input.length]; try { RandomAccessFile raf=new RandomAccessFile(TEST_FILE_NAME,"rw"); FileChannel fc=raf.getChannel(); ByteBuffer buf=ByteBuffer.wrap(input); IOUtils.writeFully(fc,buf); raf.seek(0); raf.read(output); for (int i=0; i < input.length; i++) { assertEquals(input[i],output[i]); } buf.rewind(); IOUtils.writeFully(fc,buf,HALFWAY); for (int i=0; i < HALFWAY; i++) { assertEquals(input[i],output[i]); } raf.seek(0); raf.read(output); for (int i=HALFWAY; i < input.length; i++) { assertEquals(input[i - HALFWAY],output[i]); } raf.close(); } finally { File f=new File(TEST_FILE_NAME); if (f.exists()) { f.delete(); } } }

Class: org.apache.hadoop.io.TestMapFile

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * test {@code MapFile.Writer} constructor with IOException */ @Test public void testPathExplosionWriterCreation(){ Path path=new Path(TEST_DIR,"testPathExplosionWriterCreation.mapfile"); String TEST_ERROR_MESSAGE="Mkdirs failed to create directory " + path.getName(); MapFile.Writer writer=null; try { FileSystem fsSpy=spy(FileSystem.get(conf)); Path pathSpy=spy(path); when(fsSpy.mkdirs(path)).thenThrow(new IOException(TEST_ERROR_MESSAGE)); when(pathSpy.getFileSystem(conf)).thenReturn(fsSpy); writer=new MapFile.Writer(conf,pathSpy,MapFile.Writer.keyClass(IntWritable.class),MapFile.Writer.valueClass(IntWritable.class)); fail("fail in testPathExplosionWriterCreation !!!"); } catch ( IOException ex) { assertEquals("testPathExplosionWriterCreation ex message error !!!",ex.getMessage(),TEST_ERROR_MESSAGE); } catch ( Exception e) { fail("fail in testPathExplosionWriterCreation. Other ex !!!"); } finally { IOUtils.cleanup(null,writer); } }

APIUtilityVerifier EqualityVerifier 
/** * test {@code MapFile.Reader.midKey() } method */ @Test public void testMidKeyOnCurrentApi() throws Exception { final String TEST_PREFIX="testMidKeyOnCurrentApi.mapfile"; MapFile.Writer writer=null; MapFile.Reader reader=null; try { writer=createWriter(TEST_PREFIX,IntWritable.class,IntWritable.class); int SIZE=10; for (int i=0; i < SIZE; i++) writer.append(new IntWritable(i),new IntWritable(i)); writer.close(); reader=createReader(TEST_PREFIX,IntWritable.class); assertEquals(new IntWritable((SIZE - 1) / 2),reader.midKey()); } finally { IOUtils.cleanup(null,writer,reader); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test {@code MapFile.Reader.getClosest()} method */ @Test public void testGetClosestOnCurrentApi() throws Exception { final String TEST_PREFIX="testGetClosestOnCurrentApi.mapfile"; MapFile.Writer writer=null; MapFile.Reader reader=null; try { writer=createWriter(TEST_PREFIX,Text.class,Text.class); int FIRST_KEY=1; for (int i=FIRST_KEY; i < 100; i+=10) { Text t=new Text(Integer.toString(i)); writer.append(t,t); } writer.close(); reader=createReader(TEST_PREFIX,Text.class); Text key=new Text("55"); Text value=new Text(); Text closest=(Text)reader.getClosest(key,value); assertEquals(new Text("61"),closest); closest=(Text)reader.getClosest(key,value,true); assertEquals(new Text("51"),closest); final Text explicitKey=new Text("21"); closest=(Text)reader.getClosest(explicitKey,value); assertEquals(new Text("21"),explicitKey); key=new Text("00"); closest=(Text)reader.getClosest(key,value); assertEquals(FIRST_KEY,Integer.parseInt(closest.toString())); key=new Text("92"); closest=(Text)reader.getClosest(key,value); assertNull("Not null key in testGetClosestWithNewCode",closest); closest=(Text)reader.getClosest(key,value,true); assertEquals(new Text("91"),closest); } finally { IOUtils.cleanup(null,writer,reader); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * test {@code MapFile.Reader.finalKey()} method */ @Test public void testOnFinalKey(){ final String TEST_METHOD_KEY="testOnFinalKey.mapfile"; int SIZE=10; MapFile.Writer writer=null; MapFile.Reader reader=null; try { writer=createWriter(TEST_METHOD_KEY,IntWritable.class,IntWritable.class); for (int i=0; i < SIZE; i++) writer.append(new IntWritable(i),new IntWritable(i)); writer.close(); reader=createReader(TEST_METHOD_KEY,IntWritable.class); IntWritable expectedKey=new IntWritable(0); reader.finalKey(expectedKey); assertEquals("testOnFinalKey not same !!!",expectedKey,new IntWritable(9)); } catch ( IOException ex) { fail("testOnFinalKey error !!!"); } finally { IOUtils.cleanup(null,writer,reader); } }

UtilityVerifier EqualityVerifier HybridVerifier 
/** * test {@code MapFile.rename()} * method with throwing {@code IOException} */ @Test public void testRenameWithException(){ final String ERROR_MESSAGE="Can't rename file"; final String NEW_FILE_NAME="test-new.mapfile"; final String OLD_FILE_NAME="test-old.mapfile"; MapFile.Writer writer=null; try { FileSystem fs=FileSystem.getLocal(conf); FileSystem spyFs=spy(fs); writer=createWriter(OLD_FILE_NAME,IntWritable.class,IntWritable.class); writer.close(); Path oldDir=new Path(TEST_DIR,OLD_FILE_NAME); Path newDir=new Path(TEST_DIR,NEW_FILE_NAME); when(spyFs.rename(oldDir,newDir)).thenThrow(new IOException(ERROR_MESSAGE)); MapFile.rename(spyFs,oldDir.toString(),newDir.toString()); fail("testRenameWithException no exception error !!!"); } catch ( IOException ex) { assertEquals("testRenameWithException invalid IOExceptionMessage !!!",ex.getMessage(),ERROR_MESSAGE); } finally { IOUtils.cleanup(null,writer); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test @SuppressWarnings("deprecation") public void testMidKey() throws Exception { Path dirName=new Path(TEST_DIR,"testMidKey.mapfile"); FileSystem fs=FileSystem.getLocal(conf); Path qualifiedDirName=fs.makeQualified(dirName); MapFile.Writer writer=null; MapFile.Reader reader=null; try { writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),IntWritable.class,IntWritable.class); writer.append(new IntWritable(1),new IntWritable(1)); writer.close(); reader=new MapFile.Reader(qualifiedDirName,conf); assertEquals(new IntWritable(1),reader.midKey()); } finally { IOUtils.cleanup(null,writer,reader); } }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test getClosest feature. * @throws Exception */ @Test @SuppressWarnings("deprecation") public void testGetClosest() throws Exception { Path dirName=new Path(TEST_DIR,"testGetClosest.mapfile"); FileSystem fs=FileSystem.getLocal(conf); Path qualifiedDirName=fs.makeQualified(dirName); MapFile.Writer.setIndexInterval(conf,3); MapFile.Writer writer=null; MapFile.Reader reader=null; try { writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),Text.class,Text.class); assertEquals(3,writer.getIndexInterval()); final int FIRST_KEY=10; for (int i=FIRST_KEY; i < 100; i+=10) { String iStr=Integer.toString(i); Text t=new Text("00".substring(iStr.length()) + iStr); writer.append(t,t); } writer.close(); reader=new MapFile.Reader(qualifiedDirName,conf); Text key=new Text("55"); Text value=new Text(); Text closest=(Text)reader.getClosest(key,value); assertEquals(new Text("60"),closest); closest=(Text)reader.getClosest(key,value,true); assertEquals(new Text("50"),closest); final Text TWENTY=new Text("20"); closest=(Text)reader.getClosest(TWENTY,value); assertEquals(TWENTY,closest); closest=(Text)reader.getClosest(TWENTY,value,true); assertEquals(TWENTY,closest); key=new Text("00"); closest=(Text)reader.getClosest(key,value); assertEquals(FIRST_KEY,Integer.parseInt(closest.toString())); closest=(Text)reader.getClosest(key,value,true); assertNull(closest); key=new Text("99"); closest=(Text)reader.getClosest(key,value); assertNull(closest); closest=(Text)reader.getClosest(key,value,true); assertEquals(new Text("90"),closest); } finally { IOUtils.cleanup(null,writer,reader); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test @SuppressWarnings("deprecation") public void testMidKeyEmpty() throws Exception { Path dirName=new Path(TEST_DIR,"testMidKeyEmpty.mapfile"); FileSystem fs=FileSystem.getLocal(conf); Path qualifiedDirName=fs.makeQualified(dirName); MapFile.Writer writer=new MapFile.Writer(conf,fs,qualifiedDirName.toString(),IntWritable.class,IntWritable.class); writer.close(); MapFile.Reader reader=new MapFile.Reader(qualifiedDirName,conf); try { assertEquals(null,reader.midKey()); } finally { reader.close(); } }

Class: org.apache.hadoop.io.TestSortedMapWritable

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testPutAll(){ SortedMapWritable map1=new SortedMapWritable(); SortedMapWritable map2=new SortedMapWritable(); map1.put(new Text("key"),new Text("value")); map2.putAll(map1); assertEquals("map1 entries don't match map2 entries",map1,map2); assertTrue("map2 doesn't have class information from map1",map2.classToIdMap.containsKey(Text.class) && map2.idToClassMap.containsValue(Text.class)); }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * the test */ @Test @SuppressWarnings("unchecked") public void testSortedMapWritable(){ Text[] keys={new Text("key1"),new Text("key2"),new Text("key3")}; BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes()),new BytesWritable("value3".getBytes())}; SortedMapWritable inMap=new SortedMapWritable(); for (int i=0; i < keys.length; i++) { inMap.put(keys[i],values[i]); } assertEquals(0,inMap.firstKey().compareTo(keys[0])); assertEquals(0,inMap.lastKey().compareTo(keys[2])); SortedMapWritable outMap=new SortedMapWritable(inMap); assertEquals(inMap.size(),outMap.size()); for ( Map.Entry e : inMap.entrySet()) { assertTrue(outMap.containsKey(e.getKey())); assertEquals(0,((WritableComparable)outMap.get(e.getKey())).compareTo(e.getValue())); } Text[] maps={new Text("map1"),new Text("map2")}; SortedMapWritable mapOfMaps=new SortedMapWritable(); mapOfMaps.put(maps[0],inMap); mapOfMaps.put(maps[1],outMap); SortedMapWritable copyOfMapOfMaps=new SortedMapWritable(mapOfMaps); for (int i=0; i < maps.length; i++) { assertTrue(copyOfMapOfMaps.containsKey(maps[i])); SortedMapWritable a=(SortedMapWritable)mapOfMaps.get(maps[i]); SortedMapWritable b=(SortedMapWritable)copyOfMapOfMaps.get(maps[i]); assertEquals(a.size(),b.size()); for ( Writable key : a.keySet()) { assertTrue(b.containsKey(key)); WritableComparable aValue=(WritableComparable)a.get(key); WritableComparable bValue=(WritableComparable)b.get(key); assertEquals(0,aValue.compareTo(bValue)); } } }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Tests if equal and hashCode method still hold the contract. */ @Test public void testEqualsAndHashCode(){ String failureReason; SortedMapWritable mapA=new SortedMapWritable(); SortedMapWritable mapB=new SortedMapWritable(); failureReason="SortedMapWritable couldn't be initialized. Got null reference"; assertNotNull(failureReason,mapA); assertNotNull(failureReason,mapB); assertFalse("equals method returns true when passed null",mapA.equals(null)); assertTrue("Two empty SortedMapWritables are no longer equal",mapA.equals(mapB)); Text[] keys={new Text("key1"),new Text("key2")}; BytesWritable[] values={new BytesWritable("value1".getBytes()),new BytesWritable("value2".getBytes())}; mapA.put(keys[0],values[0]); mapB.put(keys[1],values[1]); failureReason="Two SortedMapWritables with different data are now equal"; assertTrue(failureReason,mapA.hashCode() != mapB.hashCode()); assertTrue(failureReason,!mapA.equals(mapB)); assertTrue(failureReason,!mapB.equals(mapA)); mapA.put(keys[1],values[1]); mapB.put(keys[0],values[0]); failureReason="Two SortedMapWritables with same entry sets formed in different order are now different"; assertEquals(failureReason,mapA.hashCode(),mapB.hashCode()); assertTrue(failureReason,mapA.equals(mapB)); assertTrue(failureReason,mapB.equals(mapA)); mapA.put(keys[0],values[1]); mapA.put(keys[1],values[0]); failureReason="Two SortedMapWritables with different content are now equal"; assertTrue(failureReason,mapA.hashCode() != mapB.hashCode()); assertTrue(failureReason,!mapA.equals(mapB)); assertTrue(failureReason,!mapB.equals(mapA)); }

InternalCallVerifier EqualityVerifier 
/** * Test that number of "unknown" classes is propagated across multiple copies. */ @Test @SuppressWarnings("deprecation") public void testForeignClass(){ SortedMapWritable inMap=new SortedMapWritable(); inMap.put(new Text("key"),new UTF8("value")); inMap.put(new Text("key2"),new UTF8("value2")); SortedMapWritable outMap=new SortedMapWritable(inMap); SortedMapWritable copyOfCopy=new SortedMapWritable(outMap); assertEquals(1,copyOfCopy.getNewClasses()); }

Class: org.apache.hadoop.io.compress.TestCodec

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGzipCodecRead() throws IOException { Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false); assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf)); Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf); assertNotNull("zlibDecompressor is null!",zlibDecompressor); assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater); CodecPool.returnDecompressor(zlibDecompressor); String tmpDir=System.getProperty("test.build.data","/tmp/"); Path f=new Path(new Path(tmpDir),"testGzipCodecRead.txt.gz"); BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString())))); final String msg="This is the message in the file!"; bw.write(msg); bw.close(); CompressionCodecFactory ccf=new CompressionCodecFactory(conf); CompressionCodec codec=ccf.getCodec(f); Decompressor decompressor=CodecPool.getDecompressor(codec); FileSystem fs=FileSystem.getLocal(conf); InputStream is=fs.open(f); is=codec.createInputStream(is,decompressor); BufferedReader br=new BufferedReader(new InputStreamReader(is)); String line=br.readLine(); assertEquals("Didn't get the same message back!",msg,line); br.close(); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGzipCompatibility() throws IOException { Random r=new Random(); long seed=r.nextLong(); r.setSeed(seed); LOG.info("seed: " + seed); DataOutputBuffer dflbuf=new DataOutputBuffer(); GZIPOutputStream gzout=new GZIPOutputStream(dflbuf); byte[] b=new byte[r.nextInt(128 * 1024 + 1)]; r.nextBytes(b); gzout.write(b); gzout.close(); DataInputBuffer gzbuf=new DataInputBuffer(); gzbuf.reset(dflbuf.getData(),dflbuf.getLength()); Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false); CompressionCodec codec=ReflectionUtils.newInstance(GzipCodec.class,conf); Decompressor decom=codec.createDecompressor(); assertNotNull(decom); assertEquals(BuiltInGzipDecompressor.class,decom.getClass()); InputStream gzin=codec.createInputStream(gzbuf,decom); dflbuf.reset(); IOUtils.copyBytes(gzin,dflbuf,4096); final byte[] dflchk=Arrays.copyOf(dflbuf.getData(),dflbuf.getLength()); assertArrayEquals(b,dflchk); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testGzipLongOverflow() throws IOException { LOG.info("testGzipLongOverflow"); Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,false); assertFalse("ZlibFactory is using native libs against request",ZlibFactory.isNativeZlibLoaded(conf)); Decompressor zlibDecompressor=ZlibFactory.getZlibDecompressor(conf); assertNotNull("zlibDecompressor is null!",zlibDecompressor); assertTrue("ZlibFactory returned unexpected inflator",zlibDecompressor instanceof BuiltInZlibInflater); CodecPool.returnDecompressor(zlibDecompressor); String tmpDir=System.getProperty("test.build.data","/tmp/"); Path f=new Path(new Path(tmpDir),"testGzipLongOverflow.bin.gz"); BufferedWriter bw=new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString())))); final int NBUF=1024 * 4 + 1; final char[] buf=new char[1024 * 1024]; for (int i=0; i < buf.length; i++) buf[i]='\0'; for (int i=0; i < NBUF; i++) { bw.write(buf); } bw.close(); CompressionCodecFactory ccf=new CompressionCodecFactory(conf); CompressionCodec codec=ccf.getCodec(f); Decompressor decompressor=CodecPool.getDecompressor(codec); FileSystem fs=FileSystem.getLocal(conf); InputStream is=fs.open(f); is=codec.createInputStream(is,decompressor); BufferedReader br=new BufferedReader(new InputStreamReader(is)); for (int j=0; j < NBUF; j++) { int n=br.read(buf); assertEquals("got wrong read length!",n,buf.length); for (int i=0; i < buf.length; i++) assertEquals("got wrong byte!",buf[i],'\0'); } br.close(); }

Class: org.apache.hadoop.io.compress.TestCodecPool

EqualityVerifier 
@Test(timeout=1000) public void testCompressorPoolCounts(){ Compressor comp1=CodecPool.getCompressor(codec); Compressor comp2=CodecPool.getCompressor(codec); assertEquals(LEASE_COUNT_ERR,2,CodecPool.getLeasedCompressorsCount(codec)); CodecPool.returnCompressor(comp2); assertEquals(LEASE_COUNT_ERR,1,CodecPool.getLeasedCompressorsCount(codec)); CodecPool.returnCompressor(comp1); assertEquals(LEASE_COUNT_ERR,0,CodecPool.getLeasedCompressorsCount(codec)); }

EqualityVerifier 
@Test(timeout=1000) public void testDecompressorPoolCounts(){ Decompressor decomp1=CodecPool.getDecompressor(codec); Decompressor decomp2=CodecPool.getDecompressor(codec); assertEquals(LEASE_COUNT_ERR,2,CodecPool.getLeasedDecompressorsCount(codec)); CodecPool.returnDecompressor(decomp2); assertEquals(LEASE_COUNT_ERR,1,CodecPool.getLeasedDecompressorsCount(codec)); CodecPool.returnDecompressor(decomp1); assertEquals(LEASE_COUNT_ERR,0,CodecPool.getLeasedDecompressorsCount(codec)); }

Class: org.apache.hadoop.io.compress.lz4.TestLz4CompressorDecompressor

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCompressDecompress(){ int BYTE_SIZE=1024 * 54; byte[] bytes=generate(BYTE_SIZE); Lz4Compressor compressor=new Lz4Compressor(); try { compressor.setInput(bytes,0,bytes.length); assertTrue("Lz4CompressDecompress getBytesRead error !!!",compressor.getBytesRead() > 0); assertTrue("Lz4CompressDecompress getBytesWritten before compress error !!!",compressor.getBytesWritten() == 0); byte[] compressed=new byte[BYTE_SIZE]; int cSize=compressor.compress(compressed,0,compressed.length); assertTrue("Lz4CompressDecompress getBytesWritten after compress error !!!",compressor.getBytesWritten() > 0); Lz4Decompressor decompressor=new Lz4Decompressor(); decompressor.setInput(compressed,0,cSize); byte[] decompressed=new byte[BYTE_SIZE]; decompressor.decompress(decompressed,0,decompressed.length); assertTrue("testLz4CompressDecompress finished error !!!",decompressor.finished()); assertArrayEquals(bytes,decompressed); compressor.reset(); decompressor.reset(); assertTrue("decompressor getRemaining error !!!",decompressor.getRemaining() == 0); } catch ( Exception e) { fail("testLz4CompressDecompress ex error!!!"); } }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCompressorDecopressorLogicWithCompressionStreams(){ DataOutputStream deflateOut=null; DataInputStream inflateIn=null; int BYTE_SIZE=1024 * 100; byte[] bytes=generate(BYTE_SIZE); int bufferSize=262144; int compressionOverhead=(bufferSize / 6) + 32; try { DataOutputBuffer compressedDataBuffer=new DataOutputBuffer(); CompressionOutputStream deflateFilter=new BlockCompressorStream(compressedDataBuffer,new Lz4Compressor(bufferSize),bufferSize,compressionOverhead); deflateOut=new DataOutputStream(new BufferedOutputStream(deflateFilter)); deflateOut.write(bytes,0,bytes.length); deflateOut.flush(); deflateFilter.finish(); DataInputBuffer deCompressedDataBuffer=new DataInputBuffer(); deCompressedDataBuffer.reset(compressedDataBuffer.getData(),0,compressedDataBuffer.getLength()); CompressionInputStream inflateFilter=new BlockDecompressorStream(deCompressedDataBuffer,new Lz4Decompressor(bufferSize),bufferSize); inflateIn=new DataInputStream(new BufferedInputStream(inflateFilter)); byte[] result=new byte[BYTE_SIZE]; inflateIn.read(result); assertArrayEquals("original array not equals compress/decompressed array",result,bytes); } catch ( IOException e) { fail("testLz4CompressorDecopressorLogicWithCompressionStreams ex error !!!"); } finally { try { if (deflateOut != null) deflateOut.close(); if (inflateIn != null) inflateIn.close(); } catch ( Exception e) { } } }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCompressorDecompressorEmptyStreamLogic(){ ByteArrayInputStream bytesIn=null; ByteArrayOutputStream bytesOut=null; byte[] buf=null; BlockDecompressorStream blockDecompressorStream=null; try { bytesOut=new ByteArrayOutputStream(); BlockCompressorStream blockCompressorStream=new BlockCompressorStream(bytesOut,new Lz4Compressor(),1024,0); blockCompressorStream.close(); buf=bytesOut.toByteArray(); assertEquals("empty stream compressed output size != 4",4,buf.length); bytesIn=new ByteArrayInputStream(buf); blockDecompressorStream=new BlockDecompressorStream(bytesIn,new Lz4Decompressor(),1024); assertEquals("return value is not -1",-1,blockDecompressorStream.read()); } catch ( Exception e) { fail("testCompressorDecompressorEmptyStreamLogic ex error !!!" + e.getMessage()); } finally { if (blockDecompressorStream != null) try { bytesIn.close(); bytesOut.close(); blockDecompressorStream.close(); } catch ( IOException e) { } } }

Class: org.apache.hadoop.io.compress.snappy.TestSnappyCompressorDecompressor

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSnappyCompressDecompress(){ int BYTE_SIZE=1024 * 54; byte[] bytes=BytesGenerator.get(BYTE_SIZE); SnappyCompressor compressor=new SnappyCompressor(); try { compressor.setInput(bytes,0,bytes.length); assertTrue("SnappyCompressDecompress getBytesRead error !!!",compressor.getBytesRead() > 0); assertTrue("SnappyCompressDecompress getBytesWritten before compress error !!!",compressor.getBytesWritten() == 0); byte[] compressed=new byte[BYTE_SIZE]; int cSize=compressor.compress(compressed,0,compressed.length); assertTrue("SnappyCompressDecompress getBytesWritten after compress error !!!",compressor.getBytesWritten() > 0); SnappyDecompressor decompressor=new SnappyDecompressor(BYTE_SIZE); decompressor.setInput(compressed,0,cSize); byte[] decompressed=new byte[BYTE_SIZE]; decompressor.decompress(decompressed,0,decompressed.length); assertTrue("testSnappyCompressDecompress finished error !!!",decompressor.finished()); Assert.assertArrayEquals(bytes,decompressed); compressor.reset(); decompressor.reset(); assertTrue("decompressor getRemaining error !!!",decompressor.getRemaining() == 0); } catch ( Exception e) { fail("testSnappyCompressDecompress ex error!!!"); } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSnappyCompressorDecopressorLogicWithCompressionStreams(){ int BYTE_SIZE=1024 * 100; byte[] bytes=BytesGenerator.get(BYTE_SIZE); int bufferSize=262144; int compressionOverhead=(bufferSize / 6) + 32; DataOutputStream deflateOut=null; DataInputStream inflateIn=null; try { DataOutputBuffer compressedDataBuffer=new DataOutputBuffer(); CompressionOutputStream deflateFilter=new BlockCompressorStream(compressedDataBuffer,new SnappyCompressor(bufferSize),bufferSize,compressionOverhead); deflateOut=new DataOutputStream(new BufferedOutputStream(deflateFilter)); deflateOut.write(bytes,0,bytes.length); deflateOut.flush(); deflateFilter.finish(); DataInputBuffer deCompressedDataBuffer=new DataInputBuffer(); deCompressedDataBuffer.reset(compressedDataBuffer.getData(),0,compressedDataBuffer.getLength()); CompressionInputStream inflateFilter=new BlockDecompressorStream(deCompressedDataBuffer,new SnappyDecompressor(bufferSize),bufferSize); inflateIn=new DataInputStream(new BufferedInputStream(inflateFilter)); byte[] result=new byte[BYTE_SIZE]; inflateIn.read(result); Assert.assertArrayEquals("original array not equals compress/decompressed array",result,bytes); } catch ( IOException e) { fail("testSnappyCompressorDecopressorLogicWithCompressionStreams ex error !!!"); } finally { try { if (deflateOut != null) deflateOut.close(); if (inflateIn != null) inflateIn.close(); } catch ( Exception e) { } } }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testCompressorDecompressorEmptyStreamLogic(){ ByteArrayInputStream bytesIn=null; ByteArrayOutputStream bytesOut=null; byte[] buf=null; BlockDecompressorStream blockDecompressorStream=null; try { bytesOut=new ByteArrayOutputStream(); BlockCompressorStream blockCompressorStream=new BlockCompressorStream(bytesOut,new SnappyCompressor(),1024,0); blockCompressorStream.close(); buf=bytesOut.toByteArray(); assertEquals("empty stream compressed output size != 4",4,buf.length); bytesIn=new ByteArrayInputStream(buf); blockDecompressorStream=new BlockDecompressorStream(bytesIn,new SnappyDecompressor(),1024); assertEquals("return value is not -1",-1,blockDecompressorStream.read()); } catch ( Exception e) { fail("testCompressorDecompressorEmptyStreamLogic ex error !!!" + e.getMessage()); } finally { if (blockDecompressorStream != null) try { bytesIn.close(); bytesOut.close(); blockDecompressorStream.close(); } catch ( IOException e) { } } }

Class: org.apache.hadoop.io.compress.zlib.TestZlibCompressorDecompressor

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testZlibCompressDecompress(){ byte[] rawData=null; int rawDataSize=0; rawDataSize=1024 * 64; rawData=generate(rawDataSize); try { ZlibCompressor compressor=new ZlibCompressor(); ZlibDecompressor decompressor=new ZlibDecompressor(); assertFalse("testZlibCompressDecompress finished error",compressor.finished()); compressor.setInput(rawData,0,rawData.length); assertTrue("testZlibCompressDecompress getBytesRead before error",compressor.getBytesRead() == 0); compressor.finish(); byte[] compressedResult=new byte[rawDataSize]; int cSize=compressor.compress(compressedResult,0,rawDataSize); assertTrue("testZlibCompressDecompress getBytesRead ather error",compressor.getBytesRead() == rawDataSize); assertTrue("testZlibCompressDecompress compressed size no less then original size",cSize < rawDataSize); decompressor.setInput(compressedResult,0,cSize); byte[] decompressedBytes=new byte[rawDataSize]; decompressor.decompress(decompressedBytes,0,decompressedBytes.length); assertArrayEquals("testZlibCompressDecompress arrays not equals ",rawData,decompressedBytes); compressor.reset(); decompressor.reset(); } catch ( IOException ex) { fail("testZlibCompressDecompress ex !!!" + ex); } }

Class: org.apache.hadoop.io.file.tfile.TestTFileByteArrays

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testLocate() throws IOException { if (skip) return; writeRecords(3 * records1stBlock); Reader reader=new Reader(fs.open(path),fs.getFileStatus(path).getLen(),conf); Scanner scanner=reader.createScanner(); locate(scanner,composeSortedKey(KEY,2).getBytes()); locate(scanner,composeSortedKey(KEY,records1stBlock - 1).getBytes()); locate(scanner,composeSortedKey(KEY,records1stBlock).getBytes()); Location locX=locate(scanner,"keyX".getBytes()); Assert.assertEquals(scanner.endLocation,locX); scanner.close(); reader.close(); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testFailureReadValueManyTimes() throws IOException { if (skip) return; writeRecords(5); Reader reader=new Reader(fs.open(path),fs.getFileStatus(path).getLen(),conf); Scanner scanner=reader.createScanner(); byte[] vbuf=new byte[BUF_SIZE]; int vlen=scanner.entry().getValueLength(); scanner.entry().getValue(vbuf); Assert.assertEquals(new String(vbuf,0,vlen),VALUE + 0); try { scanner.entry().getValue(vbuf); Assert.fail("Cannot get the value mlutiple times."); } catch ( Exception e) { } scanner.close(); reader.close(); }

Class: org.apache.hadoop.io.file.tfile.TestTFileComparator2

IterativeVerifier EqualityVerifier 
@Test public void testSortedLongWritable() throws IOException { Configuration conf=new Configuration(); Path path=new Path(ROOT,name); FileSystem fs=path.getFileSystem(conf); FSDataOutputStream out=fs.create(path); try { TFile.Writer writer=new Writer(out,BLOCK_SIZE,"gz",jClassLongWritableComparator,conf); try { LongWritable key=new LongWritable(0); for (long i=0; i < NENTRY; ++i) { key.set(cube(i - NENTRY / 2)); DataOutputStream dos=writer.prepareAppendKey(-1); try { key.write(dos); } finally { dos.close(); } dos=writer.prepareAppendValue(-1); try { dos.write(buildValue(i).getBytes()); } finally { dos.close(); } } } finally { writer.close(); } } finally { out.close(); } FSDataInputStream in=fs.open(path); try { TFile.Reader reader=new TFile.Reader(in,fs.getFileStatus(path).getLen(),conf); try { TFile.Reader.Scanner scanner=reader.createScanner(); long i=0; BytesWritable value=new BytesWritable(); for (; !scanner.atEnd(); scanner.advance()) { scanner.entry().getValue(value); assertEquals(buildValue(i),new String(value.getBytes(),0,value.getLength())); ++i; } } finally { reader.close(); } } finally { in.close(); } }

Class: org.apache.hadoop.io.nativeio.TestNativeIO

UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test basic chmod operation */ @Test(timeout=30000) public void testChmod() throws Exception { if (Path.WINDOWS) { return; } try { NativeIO.POSIX.chmod("/this/file/doesnt/exist",777); fail("Chmod of non-existent file didn't fail"); } catch ( NativeIOException nioe) { assertEquals(Errno.ENOENT,nioe.getErrno()); } File toChmod=new File(TEST_DIR,"testChmod"); assertTrue("Create test subject",toChmod.exists() || toChmod.mkdir()); NativeIO.POSIX.chmod(toChmod.getAbsolutePath(),0777); assertPermissions(toChmod,0777); NativeIO.POSIX.chmod(toChmod.getAbsolutePath(),0000); assertPermissions(toChmod,0000); NativeIO.POSIX.chmod(toChmod.getAbsolutePath(),0644); assertPermissions(toChmod,0644); }

UtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testOpenWithCreate() throws Exception { if (Path.WINDOWS) { return; } LOG.info("Test creating a file with O_CREAT"); FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"testWorkingOpen").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT,0700); assertNotNull(true); assertTrue(fd.valid()); FileOutputStream fos=new FileOutputStream(fd); fos.write("foo".getBytes()); fos.close(); assertFalse(fd.valid()); LOG.info("Test exclusive create"); try { fd=NativeIO.POSIX.open(new File(TEST_DIR,"testWorkingOpen").getAbsolutePath(),NativeIO.POSIX.O_WRONLY | NativeIO.POSIX.O_CREAT | NativeIO.POSIX.O_EXCL,0700); fail("Was able to create existing file with O_EXCL"); } catch ( NativeIOException nioe) { LOG.info("Got expected exception for failed exclusive create",nioe); assertEquals(Errno.EEXIST,nioe.getErrno()); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test for races in fstat usage * NOTE: this test is likely to fail on RHEL 6.0 which has a non-threadsafe * implementation of getpwuid_r. */ @Test(timeout=30000) public void testMultiThreadedFstat() throws Exception { if (Path.WINDOWS) { return; } final FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat")); final AtomicReference thrown=new AtomicReference(); List statters=new ArrayList(); for (int i=0; i < 10; i++) { Thread statter=new Thread(){ @Override public void run(){ long et=Time.now() + 5000; while (Time.now() < et) { try { NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD()); assertEquals(System.getProperty("user.name"),stat.getOwner()); assertNotNull(stat.getGroup()); assertTrue(!stat.getGroup().isEmpty()); assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT); } catch ( Throwable t) { thrown.set(t); } } } } ; statters.add(statter); statter.start(); } for ( Thread t : statters) { t.join(); } fos.close(); if (thrown.get() != null) { throw new RuntimeException(thrown.get()); } }

APIUtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testMlock() throws Exception { assumeTrue(NativeIO.isAvailable()); final File TEST_FILE=new File(new File(System.getProperty("test.build.data","build/test/data")),"testMlockFile"); final int BUF_LEN=12289; byte buf[]=new byte[BUF_LEN]; int bufSum=0; for (int i=0; i < buf.length; i++) { buf[i]=(byte)(i % 60); bufSum+=buf[i]; } FileOutputStream fos=new FileOutputStream(TEST_FILE); try { fos.write(buf); fos.getChannel().force(true); } finally { fos.close(); } FileInputStream fis=null; FileChannel channel=null; try { fis=new FileInputStream(TEST_FILE); channel=fis.getChannel(); long fileSize=channel.size(); MappedByteBuffer mapbuf=channel.map(MapMode.READ_ONLY,0,fileSize); NativeIO.POSIX.mlock(mapbuf,fileSize); int sum=0; for (int i=0; i < fileSize; i++) { sum+=mapbuf.get(i); } assertEquals("Expected sums to be equal",bufSum,sum); NativeIO.POSIX.munmap(mapbuf); } finally { if (channel != null) { channel.close(); } if (fis != null) { fis.close(); } } }

BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testRenameTo() throws Exception { final File TEST_DIR=new File(new File(System.getProperty("test.build.data","build/test/data")),"renameTest"); assumeTrue(TEST_DIR.mkdirs()); File nonExistentFile=new File(TEST_DIR,"nonexistent"); File targetFile=new File(TEST_DIR,"target"); try { NativeIO.renameTo(nonExistentFile,targetFile); Assert.fail(); } catch ( NativeIOException e) { if (Path.WINDOWS) { Assert.assertEquals(String.format("The system cannot find the file specified.%n"),e.getMessage()); } else { Assert.assertEquals(Errno.ENOENT,e.getErrno()); } } File sourceFile=new File(TEST_DIR,"source"); Assert.assertTrue(sourceFile.createNewFile()); NativeIO.renameTo(sourceFile,sourceFile); NativeIO.renameTo(sourceFile,targetFile); sourceFile=new File(TEST_DIR,"source"); Assert.assertTrue(sourceFile.createNewFile()); File badTarget=new File(targetFile,"subdir"); try { NativeIO.renameTo(sourceFile,badTarget); Assert.fail(); } catch ( NativeIOException e) { if (Path.WINDOWS) { Assert.assertEquals(String.format("The parameter is incorrect.%n"),e.getMessage()); } else { Assert.assertEquals(Errno.ENOTDIR,e.getErrno()); } } FileUtils.deleteQuietly(TEST_DIR); }

UtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testSyncFileRange() throws Exception { FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testSyncFileRange")); try { fos.write("foo".getBytes()); NativeIO.POSIX.sync_file_range(fos.getFD(),0,1024,NativeIO.POSIX.SYNC_FILE_RANGE_WRITE); } catch ( UnsupportedOperationException uoe) { assumeTrue(false); } finally { fos.close(); } try { NativeIO.POSIX.sync_file_range(fos.getFD(),0,1024,NativeIO.POSIX.SYNC_FILE_RANGE_WRITE); fail("Did not throw on bad file"); } catch ( NativeIOException nioe) { assertEquals(Errno.EBADF,nioe.getErrno()); } }

EqualityVerifier 
@Test(timeout=30000) public void testFstatClosedFd() throws Exception { FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat2")); fos.close(); try { NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD()); } catch ( NativeIOException nioe) { LOG.info("Got expected exception",nioe); assertEquals(Errno.EBADF,nioe.getErrno()); } }

UtilityVerifier AssumptionSetter EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testPosixFadvise() throws Exception { if (Path.WINDOWS) { return; } FileInputStream fis=new FileInputStream("/dev/zero"); try { NativeIO.POSIX.posix_fadvise(fis.getFD(),0,0,NativeIO.POSIX.POSIX_FADV_SEQUENTIAL); } catch ( UnsupportedOperationException uoe) { assumeTrue(false); } catch ( NativeIOException nioe) { } finally { fis.close(); } try { NativeIO.POSIX.posix_fadvise(fis.getFD(),0,1024,NativeIO.POSIX.POSIX_FADV_SEQUENTIAL); fail("Did not throw on bad file"); } catch ( NativeIOException nioe) { assertEquals(Errno.EBADF,nioe.getErrno()); } try { NativeIO.POSIX.posix_fadvise(null,0,1024,NativeIO.POSIX.POSIX_FADV_SEQUENTIAL); fail("Did not throw on null file"); } catch ( NullPointerException npe) { } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testFstat() throws Exception { FileOutputStream fos=new FileOutputStream(new File(TEST_DIR,"testfstat")); NativeIO.POSIX.Stat stat=NativeIO.POSIX.getFstat(fos.getFD()); fos.close(); LOG.info("Stat: " + String.valueOf(stat)); String owner=stat.getOwner(); String expectedOwner=System.getProperty("user.name"); if (Path.WINDOWS) { UserGroupInformation ugi=UserGroupInformation.createRemoteUser(expectedOwner); final String adminsGroupString="Administrators"; if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) { expectedOwner=adminsGroupString; } } assertEquals(expectedOwner,owner); assertNotNull(stat.getGroup()); assertTrue(!stat.getGroup().isEmpty()); assertEquals("Stat mode field should indicate a regular file",NativeIO.POSIX.Stat.S_IFREG,stat.getMode() & NativeIO.POSIX.Stat.S_IFMT); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testOpenMissingWithoutCreate() throws Exception { if (Path.WINDOWS) { return; } LOG.info("Open a missing file without O_CREAT and it should fail"); try { FileDescriptor fd=NativeIO.POSIX.open(new File(TEST_DIR,"doesntexist").getAbsolutePath(),NativeIO.POSIX.O_WRONLY,0700); fail("Able to open a new file without O_CREAT"); } catch ( NativeIOException nioe) { LOG.info("Got expected exception",nioe); assertEquals(Errno.ENOENT,nioe.getErrno()); } }

Class: org.apache.hadoop.io.nativeio.TestSharedFileDescriptorFactory

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testReadAndWrite() throws Exception { File path=new File(TEST_BASE,"testReadAndWrite"); path.mkdirs(); SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("woot_",new String[]{path.getAbsolutePath()}); FileInputStream inStream=factory.createDescriptor("testReadAndWrite",4096); FileOutputStream outStream=new FileOutputStream(inStream.getFD()); outStream.write(101); inStream.getChannel().position(0); Assert.assertEquals(101,inStream.read()); inStream.close(); outStream.close(); FileUtil.fullyDelete(path); }

APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
@Test(timeout=60000) public void testDirectoryFallbacks() throws Exception { File nonExistentPath=new File(TEST_BASE,"nonexistent"); File permissionDeniedPath=new File("/"); File goodPath=new File(TEST_BASE,"testDirectoryFallbacks"); goodPath.mkdirs(); try { SharedFileDescriptorFactory.create("shm_",new String[]{nonExistentPath.getAbsolutePath(),permissionDeniedPath.getAbsolutePath()}); Assert.fail(); } catch ( IOException e) { } SharedFileDescriptorFactory factory=SharedFileDescriptorFactory.create("shm_",new String[]{nonExistentPath.getAbsolutePath(),permissionDeniedPath.getAbsolutePath(),goodPath.getAbsolutePath()}); Assert.assertEquals(goodPath.getAbsolutePath(),factory.getPath()); FileUtil.fullyDelete(goodPath); }

Class: org.apache.hadoop.io.retry.TestFailoverProxy

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverOnStandbyException() throws UnreliableException, IOException, StandbyException { UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),RetryPolicies.failoverOnNetworkException(1)); assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString()); try { unreliable.succeedsOnceThenFailsReturningString(); fail("should not have succeeded twice"); } catch ( UnreliableException e) { assertEquals("impl1",e.getMessage()); } unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(TypeOfExceptionToFailWith.STANDBY_EXCEPTION,TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),RetryPolicies.failoverOnNetworkException(1)); assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString()); assertEquals("impl2",unreliable.succeedsOnceThenFailsReturningString()); }

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testSucceedsTenTimesThenFailOver() throws UnreliableException, IOException, StandbyException { UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),new FailOverOnceOnAnyExceptionPolicy()); for (int i=0; i < 10; i++) { assertEquals("impl1",unreliable.succeedsTenTimesThenFailsReturningString()); } assertEquals("impl2",unreliable.succeedsTenTimesThenFailsReturningString()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testSuccedsOnceThenFailOver() throws UnreliableException, IOException, StandbyException { UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),new FailOverOnceOnAnyExceptionPolicy()); assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString()); assertEquals("impl2",unreliable.succeedsOnceThenFailsReturningString()); try { unreliable.succeedsOnceThenFailsReturningString(); fail("should not have succeeded more than twice"); } catch ( UnreliableException e) { } }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testFailoverOnNetworkExceptionIdempotentOperation() throws UnreliableException, IOException, StandbyException { UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(TypeOfExceptionToFailWith.IO_EXCEPTION,TypeOfExceptionToFailWith.UNRELIABLE_EXCEPTION),RetryPolicies.failoverOnNetworkException(1)); assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningString()); try { unreliable.succeedsOnceThenFailsReturningString(); fail("should not have succeeded twice"); } catch ( IOException e) { assertEquals("impl1",e.getMessage()); } assertEquals("impl1",unreliable.succeedsOnceThenFailsReturningStringIdempotent()); assertEquals("impl2",unreliable.succeedsOnceThenFailsReturningStringIdempotent()); }

UtilityVerifier EqualityVerifier HybridVerifier 
@Test public void testNeverFailOver() throws UnreliableException, IOException, StandbyException { UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,newFlipFlopProxyProvider(),RetryPolicies.TRY_ONCE_THEN_FAIL); unreliable.succeedsOnceThenFailsReturningString(); try { unreliable.succeedsOnceThenFailsReturningString(); fail("should not have succeeded twice"); } catch ( UnreliableException e) { assertEquals("impl1",e.getMessage()); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test that concurrent failed method invocations only result in a single * failover. */ @Test public void testConcurrentMethodFailures() throws InterruptedException { FlipFlopProxyProvider proxyProvider=new FlipFlopProxyProvider(UnreliableInterface.class,new SynchronizedUnreliableImplementation("impl1",TypeOfExceptionToFailWith.STANDBY_EXCEPTION,2),new UnreliableImplementation("impl2",TypeOfExceptionToFailWith.STANDBY_EXCEPTION)); final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,proxyProvider,RetryPolicies.failoverOnNetworkException(10)); ConcurrentMethodThread t1=new ConcurrentMethodThread(unreliable); ConcurrentMethodThread t2=new ConcurrentMethodThread(unreliable); t1.start(); t2.start(); t1.join(); t2.join(); assertEquals("impl2",t1.result); assertEquals("impl2",t2.result); assertEquals(1,proxyProvider.getFailoversOccurred()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Ensure that when all configured services are throwing StandbyException * that we fail over back and forth between them until one is no longer * throwing StandbyException. */ @Test public void testFailoverBetweenMultipleStandbys() throws UnreliableException, StandbyException, IOException { final long millisToSleep=10000; final UnreliableImplementation impl1=new UnreliableImplementation("impl1",TypeOfExceptionToFailWith.STANDBY_EXCEPTION); FlipFlopProxyProvider proxyProvider=new FlipFlopProxyProvider(UnreliableInterface.class,impl1,new UnreliableImplementation("impl2",TypeOfExceptionToFailWith.STANDBY_EXCEPTION)); final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,proxyProvider,RetryPolicies.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,10,1000,10000)); new Thread(){ @Override public void run(){ ThreadUtil.sleepAtLeastIgnoreInterrupts(millisToSleep); impl1.setIdentifier("renamed-impl1"); } } .start(); String result=unreliable.failsIfIdentifierDoesntMatch("renamed-impl1"); assertEquals("renamed-impl1",result); }

Class: org.apache.hadoop.io.retry.TestRetryProxy

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test for {@link RetryInvocationHandler#isRpcInvocation(Object)} */ @Test public void testRpcInvocation() throws Exception { final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,unreliableImpl,RETRY_FOREVER); assertTrue(RetryInvocationHandler.isRpcInvocation(unreliable)); ProtocolTranslator xlator=new ProtocolTranslator(){ int count=0; @Override public Object getUnderlyingProxyObject(){ count++; return unreliable; } @Override public String toString(){ return "" + count; } } ; assertTrue(RetryInvocationHandler.isRpcInvocation(xlator)); assertEquals(xlator.toString(),"1"); assertFalse(RetryInvocationHandler.isRpcInvocation(new Object())); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRetryInterruptible() throws Throwable { final UnreliableInterface unreliable=(UnreliableInterface)RetryProxy.create(UnreliableInterface.class,unreliableImpl,retryUpToMaximumTimeWithFixedSleep(10,10,TimeUnit.SECONDS)); final CountDownLatch latch=new CountDownLatch(1); final AtomicReference futureThread=new AtomicReference(); ExecutorService exec=Executors.newSingleThreadExecutor(); Future future=exec.submit(new Callable(){ @Override public Throwable call() throws Exception { futureThread.set(Thread.currentThread()); latch.countDown(); try { unreliable.alwaysFailsWithFatalException(); } catch ( UndeclaredThrowableException ute) { return ute.getCause(); } return null; } } ); latch.await(); Thread.sleep(1000); assertTrue(futureThread.get().isAlive()); futureThread.get().interrupt(); Throwable e=future.get(1,TimeUnit.SECONDS); assertNotNull(e); assertEquals(InterruptedException.class,e.getClass()); assertEquals("sleep interrupted",e.getMessage()); }

Class: org.apache.hadoop.io.serializer.TestWritableSerialization

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testWritableConfigurable() throws Exception { conf.set(CONF_TEST_KEY,CONF_TEST_VALUE); FooGenericWritable generic=new FooGenericWritable(); generic.setConf(conf); Baz baz=new Baz(); generic.set(baz); Baz result=SerializationTestUtil.testSerialization(conf,baz); assertEquals(baz,result); assertNotNull(result.getConf()); }

InternalCallVerifier EqualityVerifier 
@Test @SuppressWarnings({"rawtypes","unchecked"}) public void testWritableComparatorJavaSerialization() throws Exception { Serialization ser=new JavaSerialization(); Serializer serializer=ser.getSerializer(TestWC.class); DataOutputBuffer dob=new DataOutputBuffer(); serializer.open(dob); TestWC orig=new TestWC(0); serializer.serialize(orig); serializer.close(); Deserializer deserializer=ser.getDeserializer(TestWC.class); DataInputBuffer dib=new DataInputBuffer(); dib.reset(dob.getData(),0,dob.getLength()); deserializer.open(dib); TestWC deser=deserializer.deserialize(null); deserializer.close(); assertEquals(orig,deser); }

APIUtilityVerifier EqualityVerifier 
@Test public void testWritableSerialization() throws Exception { Text before=new Text("test writable"); Text after=SerializationTestUtil.testSerialization(conf,before); assertEquals(before,after); }

Class: org.apache.hadoop.ipc.TestCallQueueManager

InternalCallVerifier EqualityVerifier PublicFieldVerifier 
@Test(timeout=60000) public void testSwapUnderContention() throws InterruptedException { manager=new CallQueueManager(queueClass,5000,"",null); ArrayList producers=new ArrayList(); ArrayList consumers=new ArrayList(); HashMap threads=new HashMap(); for (int i=0; i < 50; i++) { Putter p=new Putter(manager,-1,-1); Thread pt=new Thread(p); producers.add(p); threads.put(p,pt); pt.start(); } for (int i=0; i < 20; i++) { Taker t=new Taker(manager,-1,-1); Thread tt=new Thread(t); consumers.add(t); threads.put(t,tt); tt.start(); } Thread.sleep(10); for (int i=0; i < 5; i++) { manager.swapQueue(queueClass,5000,"",null); } for ( Putter p : producers) { p.stop(); } Thread.sleep(2000); assertEquals(0,manager.size()); long totalCallsCreated=0; for ( Putter p : producers) { threads.get(p).interrupt(); } for ( Putter p : producers) { threads.get(p).join(); totalCallsCreated+=p.callsAdded; } long totalCallsConsumed=0; for ( Taker t : consumers) { threads.get(t).interrupt(); } for ( Taker t : consumers) { threads.get(t).join(); totalCallsConsumed+=t.callsTaken; } assertEquals(totalCallsConsumed,totalCallsCreated); }

Class: org.apache.hadoop.ipc.TestDecayRpcScheduler

InternalCallVerifier EqualityVerifier 
@Test public void testParsePeriod(){ scheduler=new DecayRpcScheduler(1,"",new Configuration()); assertEquals(DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_DEFAULT,scheduler.getDecayPeriodMillis()); Configuration conf=new Configuration(); conf.setLong("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,1058); scheduler=new DecayRpcScheduler(1,"ns",conf); assertEquals(1058L,scheduler.getDecayPeriodMillis()); }

InternalCallVerifier EqualityVerifier 
@Test public void testDecay(){ Configuration conf=new Configuration(); conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,"999999999"); conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,"0.5"); scheduler=new DecayRpcScheduler(1,"ns",conf); assertEquals(0,scheduler.getTotalCallSnapshot()); for (int i=0; i < 4; i++) { scheduler.getPriorityLevel(mockCall("A")); } for (int i=0; i < 8; i++) { scheduler.getPriorityLevel(mockCall("B")); } assertEquals(12,scheduler.getTotalCallSnapshot()); assertEquals(4,scheduler.getCallCountSnapshot().get("A").longValue()); assertEquals(8,scheduler.getCallCountSnapshot().get("B").longValue()); scheduler.forceDecay(); assertEquals(6,scheduler.getTotalCallSnapshot()); assertEquals(2,scheduler.getCallCountSnapshot().get("A").longValue()); assertEquals(4,scheduler.getCallCountSnapshot().get("B").longValue()); scheduler.forceDecay(); assertEquals(3,scheduler.getTotalCallSnapshot()); assertEquals(1,scheduler.getCallCountSnapshot().get("A").longValue()); assertEquals(2,scheduler.getCallCountSnapshot().get("B").longValue()); scheduler.forceDecay(); assertEquals(1,scheduler.getTotalCallSnapshot()); assertEquals(null,scheduler.getCallCountSnapshot().get("A")); assertEquals(1,scheduler.getCallCountSnapshot().get("B").longValue()); scheduler.forceDecay(); assertEquals(0,scheduler.getTotalCallSnapshot()); assertEquals(null,scheduler.getCallCountSnapshot().get("A")); assertEquals(null,scheduler.getCallCountSnapshot().get("B")); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=2000) public void testPeriodic() throws InterruptedException { Configuration conf=new Configuration(); conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,"10"); conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,"0.5"); scheduler=new DecayRpcScheduler(1,"ns",conf); assertEquals(10,scheduler.getDecayPeriodMillis()); assertEquals(0,scheduler.getTotalCallSnapshot()); for (int i=0; i < 64; i++) { scheduler.getPriorityLevel(mockCall("A")); } while (scheduler.getTotalCallSnapshot() > 0) { Thread.sleep(10); } }

InternalCallVerifier EqualityVerifier 
@Test public void testAccumulate(){ Configuration conf=new Configuration(); conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,"99999999"); scheduler=new DecayRpcScheduler(1,"ns",conf); assertEquals(0,scheduler.getCallCountSnapshot().size()); scheduler.getPriorityLevel(mockCall("A")); assertEquals(1,scheduler.getCallCountSnapshot().get("A").longValue()); assertEquals(1,scheduler.getCallCountSnapshot().get("A").longValue()); scheduler.getPriorityLevel(mockCall("A")); scheduler.getPriorityLevel(mockCall("B")); scheduler.getPriorityLevel(mockCall("A")); assertEquals(3,scheduler.getCallCountSnapshot().get("A").longValue()); assertEquals(1,scheduler.getCallCountSnapshot().get("B").longValue()); }

InternalCallVerifier EqualityVerifier 
@Test public void testPriority(){ Configuration conf=new Configuration(); conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,"99999999"); conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY,"25, 50, 75"); scheduler=new DecayRpcScheduler(4,"ns",conf); assertEquals(0,scheduler.getPriorityLevel(mockCall("A"))); assertEquals(2,scheduler.getPriorityLevel(mockCall("A"))); assertEquals(0,scheduler.getPriorityLevel(mockCall("B"))); assertEquals(1,scheduler.getPriorityLevel(mockCall("B"))); assertEquals(0,scheduler.getPriorityLevel(mockCall("C"))); assertEquals(0,scheduler.getPriorityLevel(mockCall("C"))); assertEquals(1,scheduler.getPriorityLevel(mockCall("A"))); assertEquals(1,scheduler.getPriorityLevel(mockCall("A"))); assertEquals(1,scheduler.getPriorityLevel(mockCall("A"))); assertEquals(2,scheduler.getPriorityLevel(mockCall("A"))); }

InternalCallVerifier EqualityVerifier 
@Test public void testParseFactor(){ scheduler=new DecayRpcScheduler(1,"",new Configuration()); assertEquals(DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT,scheduler.getDecayFactor(),0.00001); Configuration conf=new Configuration(); conf.set("prefix." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,"0.125"); scheduler=new DecayRpcScheduler(1,"prefix",conf); assertEquals(0.125,scheduler.getDecayFactor(),0.00001); }

Class: org.apache.hadoop.ipc.TestIPC

EqualityVerifier 
/** * Test the retry count while used in a retry proxy. */ @Test(timeout=60000) public void testRetryProxy() throws IOException { final Client client=new Client(LongWritable.class,conf); final TestServer server=new TestServer(1,false); server.callListener=new Runnable(){ private int retryCount=0; @Override public void run(){ Assert.assertEquals(retryCount++,Server.getCallRetryCount()); } } ; final int totalRetry=10000; DummyProtocol proxy=(DummyProtocol)Proxy.newProxyInstance(DummyProtocol.class.getClassLoader(),new Class[]{DummyProtocol.class},new TestInvocationHandler(client,server,totalRetry)); DummyProtocol retryProxy=(DummyProtocol)RetryProxy.create(DummyProtocol.class,proxy,RetryPolicies.RETRY_FOREVER); try { server.start(); retryProxy.dummyRun(); Assert.assertEquals(TestInvocationHandler.retry,totalRetry + 1); } finally { Client.setCallIdAndRetryCount(0,0); client.stop(); server.stop(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if the rpc server gets the retry count from client. */ @Test(timeout=60000) public void testCallRetryCount() throws IOException { final int retryCount=255; final Client client=new Client(LongWritable.class,conf); Client.setCallIdAndRetryCount(Client.nextCallId(),255); final TestServer server=new TestServer(1,false); server.callListener=new Runnable(){ @Override public void run(){ Assert.assertEquals(retryCount,Server.getCallRetryCount()); } } ; try { InetSocketAddress addr=NetUtils.getConnectAddress(server); server.start(); final SerialCaller caller=new SerialCaller(client,addr,10); caller.run(); assertFalse(caller.failed); } finally { client.stop(); server.stop(); } }

IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testConnectionIdleTimeouts() throws Exception { ((Log4JLogger)Server.LOG).getLogger().setLevel(Level.DEBUG); final int maxIdle=1000; final int cleanupInterval=maxIdle * 3 / 4; final int killMax=3; final int clients=1 + killMax * 2; conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,maxIdle); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_IDLETHRESHOLD_KEY,0); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_KILL_MAX_KEY,killMax); conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECTION_IDLESCANINTERVAL_KEY,cleanupInterval); final CyclicBarrier firstCallBarrier=new CyclicBarrier(2); final CyclicBarrier callBarrier=new CyclicBarrier(clients); final CountDownLatch allCallLatch=new CountDownLatch(clients); final AtomicBoolean error=new AtomicBoolean(); final TestServer server=new TestServer(clients,false); Thread[] threads=new Thread[clients]; try { server.callListener=new Runnable(){ AtomicBoolean first=new AtomicBoolean(true); @Override public void run(){ try { allCallLatch.countDown(); if (first.compareAndSet(true,false)) { firstCallBarrier.await(); } else { callBarrier.await(); } } catch ( Throwable t) { LOG.error(t); error.set(true); } } } ; server.start(); final CountDownLatch callReturned=new CountDownLatch(clients - 1); final InetSocketAddress addr=NetUtils.getConnectAddress(server); final Configuration clientConf=new Configuration(); clientConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,10000); for (int i=0; i < clients; i++) { threads[i]=new Thread(new Runnable(){ @Override public void run(){ Client client=new Client(LongWritable.class,clientConf); try { client.call(new LongWritable(Thread.currentThread().getId()),addr,null,null,0,clientConf); callReturned.countDown(); Thread.sleep(10000); } catch ( IOException e) { LOG.error(e); } catch ( InterruptedException e) { } } } ); threads[i].start(); } allCallLatch.await(); assertFalse(error.get()); assertEquals(clients,server.getNumOpenConnections()); callBarrier.await(); callReturned.await(); assertEquals(clients,server.getNumOpenConnections()); Thread.sleep(maxIdle * 2 - cleanupInterval); for (int i=clients; i > 1; i-=killMax) { Thread.sleep(cleanupInterval); assertFalse(error.get()); assertEquals(i,server.getNumOpenConnections()); } Thread.sleep(cleanupInterval); assertFalse(error.get()); assertEquals(1,server.getNumOpenConnections()); firstCallBarrier.await(); Thread.sleep(maxIdle * 2); assertFalse(error.get()); assertEquals(0,server.getNumOpenConnections()); } finally { for ( Thread t : threads) { if (t != null) { t.interrupt(); t.join(); } server.stop(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
/** * Test if * (1) the rpc server uses the call id/retry provided by the rpc client, and * (2) the rpc client receives the same call id/retry from the rpc server. */ @Test(timeout=60000) public void testCallIdAndRetry() throws IOException { final CallInfo info=new CallInfo(); final Client client=new Client(LongWritable.class,conf){ @Override Call createCall( RpcKind rpcKind, Writable rpcRequest){ final Call call=super.createCall(rpcKind,rpcRequest); info.id=call.id; info.retry=call.retry; return call; } @Override void checkResponse( RpcResponseHeaderProto header) throws IOException { super.checkResponse(header); Assert.assertEquals(info.id,header.getCallId()); Assert.assertEquals(info.retry,header.getRetryCount()); } } ; final TestServer server=new TestServer(1,false); server.callListener=new Runnable(){ @Override public void run(){ Assert.assertEquals(info.id,Server.getCallId()); Assert.assertEquals(info.retry,Server.getCallRetryCount()); } } ; try { InetSocketAddress addr=NetUtils.getConnectAddress(server); server.start(); final SerialCaller caller=new SerialCaller(client,addr,10); caller.run(); assertFalse(caller.failed); } finally { client.stop(); server.stop(); } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if the rpc server gets the default retry count (0) from client. */ @Test(timeout=60000) public void testInitialCallRetryCount() throws IOException { final Client client=new Client(LongWritable.class,conf); final TestServer server=new TestServer(1,false); server.callListener=new Runnable(){ @Override public void run(){ Assert.assertEquals(0,Server.getCallRetryCount()); } } ; try { InetSocketAddress addr=NetUtils.getConnectAddress(server); server.start(); final SerialCaller caller=new SerialCaller(client,addr,10); caller.run(); assertFalse(caller.failed); } finally { client.stop(); server.stop(); } }

APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Tests that client generates a unique sequential call ID for each RPC call, * even if multiple threads are using the same client. * @throws InterruptedException */ @Test(timeout=60000) public void testUniqueSequentialCallIds() throws IOException, InterruptedException { int serverThreads=10, callerCount=100, perCallerCallCount=100; TestServer server=new TestServer(serverThreads,false); final List callIds=Collections.synchronizedList(new ArrayList()); server.callListener=new Runnable(){ @Override public void run(){ callIds.add(Server.getCallId()); } } ; Client client=new Client(LongWritable.class,conf); try { InetSocketAddress addr=NetUtils.getConnectAddress(server); server.start(); SerialCaller[] callers=new SerialCaller[callerCount]; for (int i=0; i < callerCount; ++i) { callers[i]=new SerialCaller(client,addr,perCallerCallCount); callers[i].start(); } for (int i=0; i < callerCount; ++i) { callers[i].join(); assertFalse(callers[i].failed); } } finally { client.stop(); server.stop(); } int expectedCallCount=callerCount * perCallerCallCount; assertEquals(expectedCallCount,callIds.size()); Collections.sort(callIds); final int startID=callIds.get(0).intValue(); for (int i=0; i < expectedCallCount; ++i) { assertEquals(startID + i,callIds.get(i).intValue()); } }

Class: org.apache.hadoop.ipc.TestIdentityProviders

InternalCallVerifier EqualityVerifier 
@Test public void testUserIdentityProvider() throws IOException { UserIdentityProvider uip=new UserIdentityProvider(); String identity=uip.makeIdentity(new FakeSchedulable()); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); String username=ugi.getUserName(); assertEquals(username,identity); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testPluggableIdentityProvider(){ Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,"org.apache.hadoop.ipc.UserIdentityProvider"); List providers=conf.getInstances(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,IdentityProvider.class); assertTrue(providers.size() == 1); IdentityProvider ip=providers.get(0); assertNotNull(ip); assertEquals(ip.getClass(),UserIdentityProvider.class); }

Class: org.apache.hadoop.ipc.TestMultipleProtocolServer

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void test1() throws IOException { ProtocolProxy proxy; proxy=RPC.getProtocolProxy(Foo0.class,Foo0.versionID,addr,conf); Foo0 foo0=(Foo0)proxy.getProxy(); Assert.assertEquals("Foo0",foo0.ping()); proxy=RPC.getProtocolProxy(Foo1.class,Foo1.versionID,addr,conf); Foo1 foo1=(Foo1)proxy.getProxy(); Assert.assertEquals("Foo1",foo1.ping()); Assert.assertEquals("Foo1",foo1.ping()); proxy=RPC.getProtocolProxy(Bar.class,Foo1.versionID,addr,conf); Bar bar=(Bar)proxy.getProxy(); Assert.assertEquals(99,bar.echo(99)); Mixin mixin=bar; mixin.hello(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * getProtocolVersion of an unimplemented version should return highest version * Similarly getProtocolSignature should work. * @throws IOException */ @Test public void testNonExistingProtocol2() throws IOException { ProtocolProxy proxy; proxy=RPC.getProtocolProxy(FooUnimplemented.class,FooUnimplemented.versionID,addr,conf); FooUnimplemented foo=(FooUnimplemented)proxy.getProxy(); Assert.assertEquals(Foo1.versionID,foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class),FooUnimplemented.versionID)); foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class),FooUnimplemented.versionID,0); }

Class: org.apache.hadoop.ipc.TestProtoBufRpc

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test(timeout=6000) public void testExtraLongRpc() throws Exception { TestRpcService2 client=getClient2(); final String shortString=StringUtils.repeat("X",4); EchoRequestProto echoRequest=EchoRequestProto.newBuilder().setMessage(shortString).build(); EchoResponseProto echoResponse=client.echo2(null,echoRequest); Assert.assertEquals(shortString,echoResponse.getMessage()); final String longString=StringUtils.repeat("X",4096); echoRequest=EchoRequestProto.newBuilder().setMessage(longString).build(); try { echoResponse=client.echo2(null,echoRequest); Assert.fail("expected extra-long RPC to fail"); } catch ( ServiceException se) { } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=5000) public void testProtoBufRpc2() throws Exception { TestRpcService2 client=getClient2(); EmptyRequestProto emptyRequest=EmptyRequestProto.newBuilder().build(); client.ping2(null,emptyRequest); EchoRequestProto echoRequest=EchoRequestProto.newBuilder().setMessage("hello").build(); EchoResponseProto echoResponse=client.echo2(null,echoRequest); Assert.assertEquals(echoResponse.getMessage(),"hello"); MetricsRecordBuilder rpcMetrics=getMetrics(server.getRpcMetrics().name()); assertCounterGt("RpcQueueTimeNumOps",0L,rpcMetrics); assertCounterGt("RpcProcessingTimeNumOps",0L,rpcMetrics); MetricsRecordBuilder rpcDetailedMetrics=getMetrics(server.getRpcDetailedMetrics().name()); assertCounterGt("Echo2NumOps",0L,rpcDetailedMetrics); }

Class: org.apache.hadoop.ipc.TestRPC

APIUtilityVerifier EqualityVerifier 
@Test public void testServerAddress() throws IOException { Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build(); InetSocketAddress bindAddr=null; try { bindAddr=NetUtils.getConnectAddress(server); } finally { server.stop(); } assertEquals(InetAddress.getLocalHost(),bindAddr.getAddress()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testStopProxy() throws IOException { StoppedProtocol proxy=RPC.getProxy(StoppedProtocol.class,StoppedProtocol.versionID,null,conf); StoppedInvocationHandler invocationHandler=(StoppedInvocationHandler)Proxy.getInvocationHandler(proxy); assertEquals(0,invocationHandler.getCloseCalled()); RPC.stopProxy(proxy); assertEquals(1,invocationHandler.getCloseCalled()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testProxyAddress() throws IOException { Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build(); TestProtocol proxy=null; try { server.start(); InetSocketAddress addr=NetUtils.getConnectAddress(server); proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf); assertEquals(addr,RPC.getServerAddress(proxy)); } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testWrappedStopProxy() throws IOException { StoppedProtocol wrappedProxy=RPC.getProxy(StoppedProtocol.class,StoppedProtocol.versionID,null,conf); StoppedInvocationHandler invocationHandler=(StoppedInvocationHandler)Proxy.getInvocationHandler(wrappedProxy); StoppedProtocol proxy=(StoppedProtocol)RetryProxy.create(StoppedProtocol.class,wrappedProxy,RetryPolicies.RETRY_FOREVER); assertEquals(0,invocationHandler.getCloseCalled()); RPC.stopProxy(proxy); assertEquals(1,invocationHandler.getCloseCalled()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testConfRpc() throws IOException { Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(1).setVerbose(false).build(); int confQ=conf.getInt(CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY,CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_DEFAULT); assertEquals(confQ,server.getMaxQueueSize()); int confReaders=conf.getInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY,CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_DEFAULT); assertEquals(confReaders,server.getNumReaders()); server.stop(); server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(1).setnumReaders(3).setQueueSizePerHandler(200).setVerbose(false).build(); assertEquals(3,server.getNumReaders()); assertEquals(200,server.getMaxQueueSize()); server.stop(); }

BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test that server.stop() properly stops all threads */ @Test public void testStopsAllThreads() throws IOException, InterruptedException { int threadsBefore=countThreads("Server$Listener$Reader"); assertEquals("Expect no Reader threads running before test",0,threadsBefore); final Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).build(); server.start(); try { int threadsRunning=0; long totalSleepTime=0; do { totalSleepTime+=10; Thread.sleep(10); threadsRunning=countThreads("Server$Listener$Reader"); } while (threadsRunning == 0 && totalSleepTime < 5000); threadsRunning=countThreads("Server$Listener$Reader"); assertTrue(threadsRunning > 0); } finally { server.stop(); } int threadsAfter=countThreads("Server$Listener$Reader"); assertEquals("Expect no Reader threads left running after test",0,threadsAfter); }

Class: org.apache.hadoop.ipc.TestRPCCallBenchmark

EqualityVerifier 
@Test(timeout=20000) public void testBenchmarkWithProto() throws Exception { int rc=ToolRunner.run(new RPCCallBenchmark(),new String[]{"--clientThreads","30","--serverThreads","30","--time","5","--serverReaderThreads","4","--messageSize","1024","--engine","protobuf"}); assertEquals(0,rc); }

EqualityVerifier 
@Test(timeout=20000) public void testBenchmarkWithWritable() throws Exception { int rc=ToolRunner.run(new RPCCallBenchmark(),new String[]{"--clientThreads","30","--serverThreads","30","--time","5","--serverReaderThreads","4","--messageSize","1024","--engine","writable"}); assertEquals(0,rc); }

Class: org.apache.hadoop.ipc.TestRPCCompatibility

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up * the server registry to extract protocol signatures and versions. */ @Test public void testProtocolMetaInfoSSTranslatorPB() throws Exception { TestImpl1 impl=new TestImpl1(); server=new RPC.Builder(conf).setProtocol(TestProtocol1.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build(); server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl); server.start(); ProtocolMetaInfoServerSideTranslatorPB xlator=new ProtocolMetaInfoServerSideTranslatorPB(server); GetProtocolSignatureResponseProto resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_PROTOCOL_BUFFER)); Assert.assertEquals(0,resp.getProtocolSignatureCount()); resp=xlator.getProtocolSignature(null,createGetProtocolSigRequestProto(TestProtocol1.class,RPC.RpcKind.RPC_WRITABLE)); Assert.assertEquals(1,resp.getProtocolSignatureCount()); ProtocolSignatureProto sig=resp.getProtocolSignatureList().get(0); Assert.assertEquals(TestProtocol1.versionID,sig.getVersion()); boolean found=false; int expected=ProtocolSignature.getFingerprint(TestProtocol1.class.getMethod("echo",String.class)); for ( int m : sig.getMethodsList()) { if (expected == m) { found=true; break; } } Assert.assertTrue(found); }

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testHashCode() throws Exception { Method strMethod=TestProtocol3.class.getMethod("echo",String.class); int stringEchoHash=ProtocolSignature.getFingerprint(strMethod); Method intMethod=TestProtocol3.class.getMethod("echo",int.class); int intEchoHash=ProtocolSignature.getFingerprint(intMethod); assertFalse(stringEchoHash == intEchoHash); int intEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",int.class)); assertEquals(intEchoHash,intEchoHash1); int stringEchoHash1=ProtocolSignature.getFingerprint(TestProtocol2.class.getMethod("echo",String.class)); assertFalse(stringEchoHash == stringEchoHash1); int intEchoHashAlias=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo_alias",int.class)); assertFalse(intEchoHash == intEchoHashAlias); int intEchoHash2=ProtocolSignature.getFingerprint(TestProtocol3.class.getMethod("echo",int.class,int.class)); assertFalse(intEchoHash == intEchoHash2); int hash1=ProtocolSignature.getFingerprint(new Method[]{intMethod,strMethod}); int hash2=ProtocolSignature.getFingerprint(new Method[]{strMethod,intMethod}); assertEquals(hash1,hash2); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testVersionMismatch() throws IOException { server=new RPC.Builder(conf).setProtocol(TestProtocol2.class).setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build(); server.start(); addr=NetUtils.getConnectAddress(server); TestProtocol4 proxy=RPC.getProxy(TestProtocol4.class,TestProtocol4.versionID,addr,conf); try { proxy.echo(21); fail("The call must throw VersionMismatch exception"); } catch ( RemoteException ex) { Assert.assertEquals(RPC.VersionMismatch.class.getName(),ex.getClassName()); Assert.assertTrue(ex.getErrorCode().equals(RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH)); } catch ( IOException ex) { fail("Expected version mismatch but got " + ex); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testVersion2ClientVersion1Server() throws Exception { TestImpl1 impl=new TestImpl1(); server=new RPC.Builder(conf).setProtocol(TestProtocol1.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build(); server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl); server.start(); addr=NetUtils.getConnectAddress(server); Version2Client client=new Version2Client(); client.ping(); assertEquals("hello",client.echo("hello")); assertEquals(3,client.echo(3)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testVersion2ClientVersion2Server() throws Exception { TestImpl2 impl=new TestImpl2(); server=new RPC.Builder(conf).setProtocol(TestProtocol2.class).setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false).build(); server.addProtocol(RPC.RpcKind.RPC_WRITABLE,TestProtocol0.class,impl); server.start(); addr=NetUtils.getConnectAddress(server); Version2Client client=new Version2Client(); client.ping(); assertEquals("hello",client.echo("hello")); assertEquals(-3,client.echo(3)); }

Class: org.apache.hadoop.ipc.TestSaslRPC

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testPingInterval() throws Exception { Configuration newConf=new Configuration(conf); newConf.set(SERVER_PRINCIPAL_KEY,SERVER_PRINCIPAL_1); conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT); newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,true); ConnectionId remoteId=ConnectionId.getConnectionId(new InetSocketAddress(0),TestSaslProtocol.class,null,0,newConf); assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,remoteId.getPingInterval()); newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY,false); remoteId=ConnectionId.getConnectionId(new InetSocketAddress(0),TestSaslProtocol.class,null,0,newConf); assertEquals(0,remoteId.getPingInterval()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testErrorMessage() throws Exception { BadTokenSecretManager sm=new BadTokenSecretManager(); final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); boolean succeeded=false; try { doDigestRpc(server,sm); } catch ( RemoteException e) { LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage()); assertEquals(ERROR_MESSAGE,e.getLocalizedMessage()); assertTrue(e.unwrapRemoteException() instanceof InvalidToken); succeeded=true; } assertTrue(succeeded); }

NullVerifier EqualityVerifier HybridVerifier 
@Test public void testSaslPlainServerBadPassword(){ SaslException e=null; try { runNegotiation(new TestPlainCallbacks.Client("user","pass1"),new TestPlainCallbacks.Server("user","pass2")); } catch ( SaslException se) { e=se; } assertNotNull(e); assertEquals("PLAIN auth failed: wrong password",e.getMessage()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testPerConnectionConf() throws Exception { TestTokenSecretManager sm=new TestTokenSecretManager(); final Server server=new RPC.Builder(conf).setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); server.start(); final UserGroupInformation current=UserGroupInformation.getCurrentUser(); final InetSocketAddress addr=NetUtils.getConnectAddress(server); TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName())); Token token=new Token(tokenId,sm); SecurityUtil.setTokenService(token,addr); current.addToken(token); Configuration newConf=new Configuration(conf); newConf.set(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,""); Client client=null; TestSaslProtocol proxy1=null; TestSaslProtocol proxy2=null; TestSaslProtocol proxy3=null; int timeouts[]={111222,3333333}; try { newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[0]); proxy1=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf); proxy1.getAuthMethod(); client=WritableRpcEngine.getClient(newConf); Set conns=client.getConnectionIds(); assertEquals("number of connections in cache is wrong",1,conns.size()); proxy2=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf); proxy2.getAuthMethod(); assertEquals("number of connections in cache is wrong",1,conns.size()); newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,timeouts[1]); proxy3=RPC.getProxy(TestSaslProtocol.class,TestSaslProtocol.versionID,addr,newConf); proxy3.getAuthMethod(); assertEquals("number of connections in cache is wrong",2,conns.size()); ConnectionId[] connsArray={RPC.getConnectionIdForProxy(proxy1),RPC.getConnectionIdForProxy(proxy2),RPC.getConnectionIdForProxy(proxy3)}; assertEquals(connsArray[0],connsArray[1]); assertEquals(connsArray[0].getMaxIdleTime(),timeouts[0]); assertFalse(connsArray[0].equals(connsArray[2])); assertNotSame(connsArray[2].getMaxIdleTime(),timeouts[1]); } finally { server.stop(); if (client != null) { client.getConnectionIds().clear(); } if (proxy1 != null) RPC.stopProxy(proxy1); if (proxy2 != null) RPC.stopProxy(proxy2); if (proxy3 != null) RPC.stopProxy(proxy3); } }

Class: org.apache.hadoop.ipc.TestSocketFactory

APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
@Test public void testSocketFactoryAsKeyInMap(){ Map dummyCache=new HashMap(); int toBeCached1=1; int toBeCached2=2; Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.ipc.TestSocketFactory$DummySocketFactory"); final SocketFactory dummySocketFactory=NetUtils.getDefaultSocketFactory(conf); dummyCache.put(dummySocketFactory,toBeCached1); conf.set(CommonConfigurationKeys.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,"org.apache.hadoop.net.StandardSocketFactory"); final SocketFactory defaultSocketFactory=NetUtils.getDefaultSocketFactory(conf); dummyCache.put(defaultSocketFactory,toBeCached2); Assert.assertEquals("The cache contains two elements",2,dummyCache.size()); Assert.assertEquals("Equals of both socket factory shouldn't be same",defaultSocketFactory.equals(dummySocketFactory),false); assertSame(toBeCached2,dummyCache.remove(defaultSocketFactory)); dummyCache.put(defaultSocketFactory,toBeCached2); assertSame(toBeCached1,dummyCache.remove(dummySocketFactory)); }

Class: org.apache.hadoop.ipc.TestWeightedRoundRobinMultiplexer

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testCustomPattern(){ Configuration conf=new Configuration(); conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,"1","1"); mux=new WeightedRoundRobinMultiplexer(2,"test.custom",conf); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),1); conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY,"1","3","2"); mux=new WeightedRoundRobinMultiplexer(3,"test.custom",conf); for (int i=0; i < 5; i++) { assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),2); assertEquals(mux.getAndAdvanceCurrentIndex(),2); } }

IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test public void testDefaultPattern(){ mux=new WeightedRoundRobinMultiplexer(1,"",new Configuration()); for (int i=0; i < 10; i++) { assertEquals(mux.getAndAdvanceCurrentIndex(),0); } mux=new WeightedRoundRobinMultiplexer(2,"",new Configuration()); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),1); mux=new WeightedRoundRobinMultiplexer(3,"",new Configuration()); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),2); assertEquals(mux.getAndAdvanceCurrentIndex(),0); mux=new WeightedRoundRobinMultiplexer(4,"",new Configuration()); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),0); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),1); assertEquals(mux.getAndAdvanceCurrentIndex(),2); assertEquals(mux.getAndAdvanceCurrentIndex(),2); assertEquals(mux.getAndAdvanceCurrentIndex(),3); assertEquals(mux.getAndAdvanceCurrentIndex(),0); }

Class: org.apache.hadoop.lib.lang.TestRunnableCallable

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void callable() throws Exception { C c=new C(); RunnableCallable rc=new RunnableCallable(c); rc.run(); assertTrue(c.RUN); c=new C(); rc=new RunnableCallable(c); rc.call(); assertTrue(c.RUN); assertEquals(rc.toString(),"C"); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void runnable() throws Exception { R r=new R(); RunnableCallable rc=new RunnableCallable(r); rc.run(); assertTrue(r.RUN); r=new R(); rc=new RunnableCallable(r); rc.call(); assertTrue(r.RUN); assertEquals(rc.toString(),"R"); }

Class: org.apache.hadoop.lib.lang.TestXException

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testXException() throws Exception { XException ex=new XException(TestERROR.TC); assertEquals(ex.getError(),TestERROR.TC); assertEquals(ex.getMessage(),"TC: {0}"); assertNull(ex.getCause()); ex=new XException(TestERROR.TC,"msg"); assertEquals(ex.getError(),TestERROR.TC); assertEquals(ex.getMessage(),"TC: msg"); assertNull(ex.getCause()); Exception cause=new Exception(); ex=new XException(TestERROR.TC,cause); assertEquals(ex.getError(),TestERROR.TC); assertEquals(ex.getMessage(),"TC: " + cause.toString()); assertEquals(ex.getCause(),cause); XException xcause=ex; ex=new XException(xcause); assertEquals(ex.getError(),TestERROR.TC); assertEquals(ex.getMessage(),xcause.getMessage()); assertEquals(ex.getCause(),xcause); }

Class: org.apache.hadoop.lib.server.TestBaseService

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void baseService() throws Exception { BaseService service=new MyService(); assertNull(service.getInterface()); assertEquals(service.getPrefix(),"myservice"); assertEquals(service.getServiceDependencies().length,0); Server server=Mockito.mock(Server.class); Configuration conf=new Configuration(false); conf.set("server.myservice.foo","FOO"); conf.set("server.myservice1.bar","BAR"); Mockito.when(server.getConfig()).thenReturn(conf); Mockito.when(server.getPrefixedName("myservice.foo")).thenReturn("server.myservice.foo"); Mockito.when(server.getPrefixedName("myservice.")).thenReturn("server.myservice."); service.init(server); assertEquals(service.getPrefixedName("foo"),"server.myservice.foo"); assertEquals(service.getServiceConfig().size(),1); assertEquals(service.getServiceConfig().get("foo"),"FOO"); assertTrue(MyService.INIT); }

Class: org.apache.hadoop.lib.server.TestServer

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @TestDir public void lifeCycle() throws Exception { Configuration conf=new Configuration(false); conf.set("server.services",LifeCycleService.class.getName()); Server server=createServer(conf); assertEquals(server.getStatus(),Server.Status.UNDEF); server.init(); assertNotNull(server.get(LifeCycleService.class)); assertEquals(server.getStatus(),Server.Status.NORMAL); server.destroy(); assertEquals(server.getStatus(),Server.Status.SHUTDOWN); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @TestDir public void serviceLifeCycle() throws Exception { TestService.LIFECYCLE.clear(); Configuration conf=new Configuration(false); conf.set("server.services",TestService.class.getName()); Server server=createServer(conf); server.init(); assertNotNull(server.get(TestService.class)); server.destroy(); assertEquals(TestService.LIFECYCLE,Arrays.asList("init","postInit","serverStatusChange","destroy")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @TestDir public void constructorsGetters() throws Exception { Server server=new Server("server",getAbsolutePath("/a"),getAbsolutePath("/b"),getAbsolutePath("/c"),getAbsolutePath("/d"),new Configuration(false)); assertEquals(server.getHomeDir(),getAbsolutePath("/a")); assertEquals(server.getConfigDir(),getAbsolutePath("/b")); assertEquals(server.getLogDir(),getAbsolutePath("/c")); assertEquals(server.getTempDir(),getAbsolutePath("/d")); assertEquals(server.getName(),"server"); assertEquals(server.getPrefix(),"server"); assertEquals(server.getPrefixedName("name"),"server.name"); assertNotNull(server.getConfig()); server=new Server("server",getAbsolutePath("/a"),getAbsolutePath("/b"),getAbsolutePath("/c"),getAbsolutePath("/d")); assertEquals(server.getHomeDir(),getAbsolutePath("/a")); assertEquals(server.getConfigDir(),getAbsolutePath("/b")); assertEquals(server.getLogDir(),getAbsolutePath("/c")); assertEquals(server.getTempDir(),getAbsolutePath("/d")); assertEquals(server.getName(),"server"); assertEquals(server.getPrefix(),"server"); assertEquals(server.getPrefixedName("name"),"server.name"); assertNull(server.getConfig()); server=new Server("server",TestDirHelper.getTestDir().getAbsolutePath(),new Configuration(false)); assertEquals(server.getHomeDir(),TestDirHelper.getTestDir().getAbsolutePath()); assertEquals(server.getConfigDir(),TestDirHelper.getTestDir() + "/conf"); assertEquals(server.getLogDir(),TestDirHelper.getTestDir() + "/log"); assertEquals(server.getTempDir(),TestDirHelper.getTestDir() + "/temp"); assertEquals(server.getName(),"server"); assertEquals(server.getPrefix(),"server"); assertEquals(server.getPrefixedName("name"),"server.name"); assertNotNull(server.getConfig()); server=new Server("server",TestDirHelper.getTestDir().getAbsolutePath()); assertEquals(server.getHomeDir(),TestDirHelper.getTestDir().getAbsolutePath()); assertEquals(server.getConfigDir(),TestDirHelper.getTestDir() + "/conf"); assertEquals(server.getLogDir(),TestDirHelper.getTestDir() + "/log"); assertEquals(server.getTempDir(),TestDirHelper.getTestDir() + "/temp"); assertEquals(server.getName(),"server"); assertEquals(server.getPrefix(),"server"); assertEquals(server.getPrefixedName("name"),"server.name"); assertNull(server.getConfig()); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test @TestDir public void services() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); Configuration conf; Server server; ORDER.clear(); conf=new Configuration(false); server=new Server("server",dir,dir,dir,dir,conf); server.init(); assertEquals(ORDER.size(),0); ORDER.clear(); String services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); assertEquals(server.get(MyService1.class).getInterface(),MyService1.class); assertEquals(server.get(MyService3.class).getInterface(),MyService3.class); assertEquals(ORDER.size(),4); assertEquals(ORDER.get(0),"s1.init"); assertEquals(ORDER.get(1),"s3.init"); assertEquals(ORDER.get(2),"s1.postInit"); assertEquals(ORDER.get(3),"s3.postInit"); server.destroy(); assertEquals(ORDER.size(),6); assertEquals(ORDER.get(4),"s3.destroy"); assertEquals(ORDER.get(5),"s1.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService2.class.getName(),MyService3.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); try { server.init(); fail(); } catch ( ServerException ex) { assertEquals(MyService2.class,ex.getError().getClass()); } catch ( Exception ex) { fail(); } assertEquals(ORDER.size(),3); assertEquals(ORDER.get(0),"s1.init"); assertEquals(ORDER.get(1),"s2.init"); assertEquals(ORDER.get(2),"s1.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService5.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); assertEquals(ORDER.size(),4); assertEquals(ORDER.get(0),"s1.init"); assertEquals(ORDER.get(1),"s5.init"); assertEquals(ORDER.get(2),"s1.postInit"); assertEquals(ORDER.get(3),"s5.postInit"); server.destroy(); assertEquals(ORDER.size(),6); assertEquals(ORDER.get(4),"s5.destroy"); assertEquals(ORDER.get(5),"s1.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName())); String servicesExt=StringUtils.join(",",Arrays.asList(MyService1a.class.getName())); conf=new Configuration(false); conf.set("server.services",services); conf.set("server.services.ext",servicesExt); server=new Server("server",dir,dir,dir,dir,conf); server.init(); assertEquals(server.get(MyService1.class).getClass(),MyService1a.class); assertEquals(ORDER.size(),4); assertEquals(ORDER.get(0),"s1a.init"); assertEquals(ORDER.get(1),"s3.init"); assertEquals(ORDER.get(2),"s1a.postInit"); assertEquals(ORDER.get(3),"s3.postInit"); server.destroy(); assertEquals(ORDER.size(),6); assertEquals(ORDER.get(4),"s3.destroy"); assertEquals(ORDER.get(5),"s1a.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); server.setService(MyService1a.class); assertEquals(ORDER.size(),6); assertEquals(ORDER.get(4),"s1.destroy"); assertEquals(ORDER.get(5),"s1a.init"); assertEquals(server.get(MyService1.class).getClass(),MyService1a.class); server.destroy(); assertEquals(ORDER.size(),8); assertEquals(ORDER.get(6),"s3.destroy"); assertEquals(ORDER.get(7),"s1a.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); server.setService(MyService5.class); assertEquals(ORDER.size(),5); assertEquals(ORDER.get(4),"s5.init"); assertEquals(server.get(MyService5.class).getClass(),MyService5.class); server.destroy(); assertEquals(ORDER.size(),8); assertEquals(ORDER.get(5),"s5.destroy"); assertEquals(ORDER.get(6),"s3.destroy"); assertEquals(ORDER.get(7),"s1.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService3.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); try { server.setService(MyService7.class); fail(); } catch ( ServerException ex) { assertEquals(ServerException.ERROR.S09,ex.getError()); } catch ( Exception ex) { fail(); } assertEquals(ORDER.size(),6); assertEquals(ORDER.get(4),"s3.destroy"); assertEquals(ORDER.get(5),"s1.destroy"); ORDER.clear(); services=StringUtils.join(",",Arrays.asList(MyService1.class.getName(),MyService6.class.getName())); conf=new Configuration(false); conf.set("server.services",services); server=new Server("server",dir,dir,dir,dir,conf); server.init(); assertEquals(server.get(MyService1.class).getInterface(),MyService1.class); assertEquals(server.get(MyService6.class).getInterface(),MyService6.class); server.destroy(); }

InternalCallVerifier EqualityVerifier 
@Test @TestDir public void loadingDefaultConfig() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); Server server=new Server("testserver",dir,dir,dir,dir); server.init(); assertEquals(server.getConfig().get("testserver.a"),"default"); }

InternalCallVerifier EqualityVerifier 
@Test @TestDir public void loadingSysPropConfig() throws Exception { try { System.setProperty("testserver.a","sysprop"); String dir=TestDirHelper.getTestDir().getAbsolutePath(); File configFile=new File(dir,"testserver-site.xml"); Writer w=new FileWriter(configFile); w.write("testserver.asite"); w.close(); Server server=new Server("testserver",dir,dir,dir,dir); server.init(); assertEquals(server.getConfig().get("testserver.a"),"sysprop"); } finally { System.getProperties().remove("testserver.a"); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test @TestDir public void startWithStatusNotNormal() throws Exception { Configuration conf=new Configuration(false); conf.set("server.startup.status","ADMIN"); Server server=createServer(conf); server.init(); assertEquals(server.getStatus(),Server.Status.ADMIN); server.destroy(); }

InternalCallVerifier EqualityVerifier 
@Test @TestDir public void loadingSiteConfig() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); File configFile=new File(dir,"testserver-site.xml"); Writer w=new FileWriter(configFile); w.write("testserver.asite"); w.close(); Server server=new Server("testserver",dir,dir,dir,dir); server.init(); assertEquals(server.getConfig().get("testserver.a"),"site"); }

Class: org.apache.hadoop.lib.service.hadoop.TestFileSystemAccessService

UtilityVerifier EqualityVerifier HybridVerifier 
@Test @TestDir @TestHdfs public void fileSystemExecutorException() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName())); Configuration hadoopConf=new Configuration(false); hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); createHadoopConf(hadoopConf); Configuration conf=new Configuration(false); conf.set("server.services",services); conf.set("server.hadoop.filesystem.cache.purge.timeout","0"); Server server=new Server("server",dir,dir,dir,dir,conf); server.init(); FileSystemAccess hadoop=server.get(FileSystemAccess.class); final FileSystem fsa[]=new FileSystem[1]; try { hadoop.execute("u",hadoop.getFileSystemConfiguration(),new FileSystemAccess.FileSystemExecutor(){ @Override public Void execute( FileSystem fs) throws IOException { fsa[0]=fs; throw new IOException(); } } ); Assert.fail(); } catch ( FileSystemAccessException ex) { Assert.assertEquals(ex.getError(),FileSystemAccessException.ERROR.H03); } catch ( Exception ex) { Assert.fail(); } try { fsa[0].mkdirs(new Path("/tmp/foo")); Assert.fail(); } catch ( IOException ex) { } catch ( Exception ex) { Assert.fail(); } server.destroy(); }

UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test @TestDir @TestHdfs public void fileSystemCache() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName())); Configuration hadoopConf=new Configuration(false); hadoopConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); createHadoopConf(hadoopConf); Configuration conf=new Configuration(false); conf.set("server.services",services); conf.set("server.hadoop.filesystem.cache.purge.frequency","1"); conf.set("server.hadoop.filesystem.cache.purge.timeout","1"); Server server=new Server("server",dir,dir,dir,dir,conf); try { server.init(); FileSystemAccess hadoop=server.get(FileSystemAccess.class); FileSystem fs1=hadoop.createFileSystem("u",hadoop.getFileSystemConfiguration()); Assert.assertNotNull(fs1); fs1.mkdirs(new Path("/tmp/foo1")); hadoop.releaseFileSystem(fs1); fs1.mkdirs(new Path("/tmp/foo2")); FileSystem fs2=hadoop.createFileSystem("u",hadoop.getFileSystemConfiguration()); Assert.assertEquals(fs1,fs2); Thread.sleep(4 * 1000); fs1.mkdirs(new Path("/tmp/foo2")); Thread.sleep(4 * 1000); fs2.mkdirs(new Path("/tmp/foo")); hadoop.releaseFileSystem(fs2); Thread.sleep(4 * 1000); try { fs2.mkdirs(new Path("/tmp/foo")); Assert.fail(); } catch ( IOException ex) { } catch ( Exception ex) { Assert.fail(); } } finally { server.destroy(); } }

InternalCallVerifier EqualityVerifier 
@Test @TestDir public void serviceHadoopConfCustomDir() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); String hadoopConfDir=new File(dir,"confx").getAbsolutePath(); new File(hadoopConfDir).mkdirs(); String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName())); Configuration conf=new Configuration(false); conf.set("server.services",services); conf.set("server.hadoop.config.dir",hadoopConfDir); File hdfsSite=new File(hadoopConfDir,"hdfs-site.xml"); OutputStream os=new FileOutputStream(hdfsSite); Configuration hadoopConf=new Configuration(false); hadoopConf.set("foo","BAR"); hadoopConf.writeXml(os); os.close(); Server server=new Server("server",dir,dir,dir,dir,conf); server.init(); FileSystemAccessService fsAccess=(FileSystemAccessService)server.get(FileSystemAccess.class); Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"),"BAR"); server.destroy(); }

InternalCallVerifier EqualityVerifier 
@Test @TestDir public void serviceHadoopConf() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); String services=StringUtils.join(",",Arrays.asList(InstrumentationService.class.getName(),SchedulerService.class.getName(),FileSystemAccessService.class.getName())); Configuration conf=new Configuration(false); conf.set("server.services",services); Server server=new Server("server",dir,dir,dir,dir,conf); server.init(); FileSystemAccessService fsAccess=(FileSystemAccessService)server.get(FileSystemAccess.class); Assert.assertEquals(fsAccess.serviceHadoopConf.get("foo"),"FOO"); server.destroy(); }

Class: org.apache.hadoop.lib.service.instrumentation.TestInstrumentationService

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void sampler() throws Exception { final long value[]=new long[1]; Instrumentation.Variable var=new Instrumentation.Variable(){ @Override public Long getValue(){ return value[0]; } } ; InstrumentationService.Sampler sampler=new InstrumentationService.Sampler(); sampler.init(4,var); assertEquals(sampler.getRate(),0f,0.0001); sampler.sample(); assertEquals(sampler.getRate(),0f,0.0001); value[0]=1; sampler.sample(); assertEquals(sampler.getRate(),(0d + 1) / 2,0.0001); value[0]=2; sampler.sample(); assertEquals(sampler.getRate(),(0d + 1 + 2) / 3,0.0001); value[0]=3; sampler.sample(); assertEquals(sampler.getRate(),(0d + 1 + 2+ 3) / 4,0.0001); value[0]=4; sampler.sample(); assertEquals(sampler.getRate(),(4d + 1 + 2+ 3) / 4,0.0001); JSONObject json=(JSONObject)new JSONParser().parse(sampler.toJSONString()); assertEquals(json.size(),2); assertEquals(json.get("sampler"),sampler.getRate()); assertEquals(json.get("size"),4L); StringWriter writer=new StringWriter(); sampler.writeJSONString(writer); writer.close(); json=(JSONObject)new JSONParser().parse(writer.toString()); assertEquals(json.size(),2); assertEquals(json.get("sampler"),sampler.getRate()); assertEquals(json.get("size"),4L); }

UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void cron(){ InstrumentationService.Cron cron=new InstrumentationService.Cron(); assertEquals(cron.start,0); assertEquals(cron.lapStart,0); assertEquals(cron.own,0); assertEquals(cron.total,0); long begin=Time.now(); assertEquals(cron.start(),cron); assertEquals(cron.start(),cron); assertEquals(cron.start,begin,20); assertEquals(cron.start,cron.lapStart); sleep(100); assertEquals(cron.stop(),cron); long end=Time.now(); long delta=end - begin; assertEquals(cron.own,delta,20); assertEquals(cron.total,0); assertEquals(cron.lapStart,0); sleep(100); long reStart=Time.now(); cron.start(); assertEquals(cron.start,begin,20); assertEquals(cron.lapStart,reStart,20); sleep(100); cron.stop(); long reEnd=Time.now(); delta+=reEnd - reStart; assertEquals(cron.own,delta,20); assertEquals(cron.total,0); assertEquals(cron.lapStart,0); cron.end(); assertEquals(cron.total,reEnd - begin,20); try { cron.start(); fail(); } catch ( IllegalStateException ex) { } catch ( Exception ex) { fail(); } try { cron.stop(); fail(); } catch ( IllegalStateException ex) { } catch ( Exception ex) { fail(); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void timer() throws Exception { InstrumentationService.Timer timer=new InstrumentationService.Timer(2); InstrumentationService.Cron cron=new InstrumentationService.Cron(); long ownStart; long ownEnd; long totalStart; long totalEnd; long ownDelta; long totalDelta; long avgTotal; long avgOwn; cron.start(); ownStart=Time.now(); totalStart=ownStart; ownDelta=0; sleep(100); cron.stop(); ownEnd=Time.now(); ownDelta+=ownEnd - ownStart; sleep(100); cron.start(); ownStart=Time.now(); sleep(100); cron.stop(); ownEnd=Time.now(); ownDelta+=ownEnd - ownStart; totalEnd=ownEnd; totalDelta=totalEnd - totalStart; avgTotal=totalDelta; avgOwn=ownDelta; timer.addCron(cron); long[] values=timer.getValues(); assertEquals(values[InstrumentationService.Timer.LAST_TOTAL],totalDelta,20); assertEquals(values[InstrumentationService.Timer.LAST_OWN],ownDelta,20); assertEquals(values[InstrumentationService.Timer.AVG_TOTAL],avgTotal,20); assertEquals(values[InstrumentationService.Timer.AVG_OWN],avgOwn,20); cron=new InstrumentationService.Cron(); cron.start(); ownStart=Time.now(); totalStart=ownStart; ownDelta=0; sleep(200); cron.stop(); ownEnd=Time.now(); ownDelta+=ownEnd - ownStart; sleep(200); cron.start(); ownStart=Time.now(); sleep(200); cron.stop(); ownEnd=Time.now(); ownDelta+=ownEnd - ownStart; totalEnd=ownEnd; totalDelta=totalEnd - totalStart; avgTotal=(avgTotal * 1 + totalDelta) / 2; avgOwn=(avgOwn * 1 + ownDelta) / 2; timer.addCron(cron); values=timer.getValues(); assertEquals(values[InstrumentationService.Timer.LAST_TOTAL],totalDelta,20); assertEquals(values[InstrumentationService.Timer.LAST_OWN],ownDelta,20); assertEquals(values[InstrumentationService.Timer.AVG_TOTAL],avgTotal,20); assertEquals(values[InstrumentationService.Timer.AVG_OWN],avgOwn,20); avgTotal=totalDelta; avgOwn=ownDelta; cron=new InstrumentationService.Cron(); cron.start(); ownStart=Time.now(); totalStart=ownStart; ownDelta=0; sleep(300); cron.stop(); ownEnd=Time.now(); ownDelta+=ownEnd - ownStart; sleep(300); cron.start(); ownStart=Time.now(); sleep(300); cron.stop(); ownEnd=Time.now(); ownDelta+=ownEnd - ownStart; totalEnd=ownEnd; totalDelta=totalEnd - totalStart; avgTotal=(avgTotal * 1 + totalDelta) / 2; avgOwn=(avgOwn * 1 + ownDelta) / 2; cron.stop(); timer.addCron(cron); values=timer.getValues(); assertEquals(values[InstrumentationService.Timer.LAST_TOTAL],totalDelta,20); assertEquals(values[InstrumentationService.Timer.LAST_OWN],ownDelta,20); assertEquals(values[InstrumentationService.Timer.AVG_TOTAL],avgTotal,20); assertEquals(values[InstrumentationService.Timer.AVG_OWN],avgOwn,20); JSONObject json=(JSONObject)new JSONParser().parse(timer.toJSONString()); assertEquals(json.size(),4); assertEquals(json.get("lastTotal"),values[InstrumentationService.Timer.LAST_TOTAL]); assertEquals(json.get("lastOwn"),values[InstrumentationService.Timer.LAST_OWN]); assertEquals(json.get("avgTotal"),values[InstrumentationService.Timer.AVG_TOTAL]); assertEquals(json.get("avgOwn"),values[InstrumentationService.Timer.AVG_OWN]); StringWriter writer=new StringWriter(); timer.writeJSONString(writer); writer.close(); json=(JSONObject)new JSONParser().parse(writer.toString()); assertEquals(json.size(),4); assertEquals(json.get("lastTotal"),values[InstrumentationService.Timer.LAST_TOTAL]); assertEquals(json.get("lastOwn"),values[InstrumentationService.Timer.LAST_OWN]); assertEquals(json.get("avgTotal"),values[InstrumentationService.Timer.AVG_TOTAL]); assertEquals(json.get("avgOwn"),values[InstrumentationService.Timer.AVG_OWN]); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void variableHolder() throws Exception { InstrumentationService.VariableHolder variableHolder=new InstrumentationService.VariableHolder(); variableHolder.var=new Instrumentation.Variable(){ @Override public String getValue(){ return "foo"; } } ; JSONObject json=(JSONObject)new JSONParser().parse(variableHolder.toJSONString()); assertEquals(json.size(),1); assertEquals(json.get("value"),"foo"); StringWriter writer=new StringWriter(); variableHolder.writeJSONString(writer); writer.close(); json=(JSONObject)new JSONParser().parse(writer.toString()); assertEquals(json.size(),1); assertEquals(json.get("value"),"foo"); }

Class: org.apache.hadoop.lib.servlet.TestMDCFilter

BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void mdc() throws Exception { HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getUserPrincipal()).thenReturn(null); Mockito.when(request.getMethod()).thenReturn("METHOD"); Mockito.when(request.getPathInfo()).thenReturn("/pathinfo"); ServletResponse response=Mockito.mock(ServletResponse.class); final AtomicBoolean invoked=new AtomicBoolean(); FilterChain chain=new FilterChain(){ @Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { assertEquals(MDC.get("hostname"),null); assertEquals(MDC.get("user"),null); assertEquals(MDC.get("method"),"METHOD"); assertEquals(MDC.get("path"),"/pathinfo"); invoked.set(true); } } ; MDC.clear(); Filter filter=new MDCFilter(); filter.init(null); filter.doFilter(request,response,chain); assertTrue(invoked.get()); assertNull(MDC.get("hostname")); assertNull(MDC.get("user")); assertNull(MDC.get("method")); assertNull(MDC.get("path")); Mockito.when(request.getUserPrincipal()).thenReturn(new Principal(){ @Override public String getName(){ return "name"; } } ); invoked.set(false); chain=new FilterChain(){ @Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { assertEquals(MDC.get("hostname"),null); assertEquals(MDC.get("user"),"name"); assertEquals(MDC.get("method"),"METHOD"); assertEquals(MDC.get("path"),"/pathinfo"); invoked.set(true); } } ; filter.doFilter(request,response,chain); assertTrue(invoked.get()); HostnameFilter.HOSTNAME_TL.set("HOST"); invoked.set(false); chain=new FilterChain(){ @Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { assertEquals(MDC.get("hostname"),"HOST"); assertEquals(MDC.get("user"),"name"); assertEquals(MDC.get("method"),"METHOD"); assertEquals(MDC.get("path"),"/pathinfo"); invoked.set(true); } } ; filter.doFilter(request,response,chain); assertTrue(invoked.get()); HostnameFilter.HOSTNAME_TL.remove(); filter.destroy(); }

Class: org.apache.hadoop.lib.servlet.TestServerWebApp

EqualityVerifier 
@Test public void getHomeDir(){ System.setProperty("TestServerWebApp0.home.dir","/tmp"); assertEquals(ServerWebApp.getHomeDir("TestServerWebApp0"),"/tmp"); assertEquals(ServerWebApp.getDir("TestServerWebApp0",".log.dir","/tmp/log"),"/tmp/log"); System.setProperty("TestServerWebApp0.log.dir","/tmplog"); assertEquals(ServerWebApp.getDir("TestServerWebApp0",".log.dir","/tmp/log"),"/tmplog"); }

InternalCallVerifier EqualityVerifier 
@Test @TestDir public void testResolveAuthority() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); System.setProperty("TestServerWebApp3.home.dir",dir); System.setProperty("TestServerWebApp3.config.dir",dir); System.setProperty("TestServerWebApp3.log.dir",dir); System.setProperty("TestServerWebApp3.temp.dir",dir); System.setProperty("testserverwebapp3.http.hostname","localhost"); System.setProperty("testserverwebapp3.http.port","14000"); ServerWebApp server=new ServerWebApp("TestServerWebApp3"){ } ; InetSocketAddress address=server.resolveAuthority(); Assert.assertEquals("localhost",address.getHostName()); Assert.assertEquals(14000,address.getPort()); }

InternalCallVerifier EqualityVerifier 
@Test @TestDir public void lifecycle() throws Exception { String dir=TestDirHelper.getTestDir().getAbsolutePath(); System.setProperty("TestServerWebApp1.home.dir",dir); System.setProperty("TestServerWebApp1.config.dir",dir); System.setProperty("TestServerWebApp1.log.dir",dir); System.setProperty("TestServerWebApp1.temp.dir",dir); ServerWebApp server=new ServerWebApp("TestServerWebApp1"){ } ; assertEquals(server.getStatus(),Server.Status.UNDEF); server.contextInitialized(null); assertEquals(server.getStatus(),Server.Status.NORMAL); server.contextDestroyed(null); assertEquals(server.getStatus(),Server.Status.SHUTDOWN); }

Class: org.apache.hadoop.lib.util.TestCheck

EqualityVerifier 
@Test public void validIdentifierValid() throws Exception { assertEquals(Check.validIdentifier("a",1,""),"a"); assertEquals(Check.validIdentifier("a1",2,""),"a1"); assertEquals(Check.validIdentifier("a_",3,""),"a_"); assertEquals(Check.validIdentifier("_",1,""),"_"); }

EqualityVerifier 
@Test public void checkGEZero(){ assertEquals(Check.ge0(120,"test"),120); assertEquals(Check.ge0(0,"test"),0); }

EqualityVerifier 
@Test public void notEmptyNotEmtpy(){ assertEquals(Check.notEmpty("value","name"),"value"); }

EqualityVerifier 
@Test public void checkGTZeroGreater(){ assertEquals(Check.gt0(120,"test"),120); }

EqualityVerifier 
@Test public void notNullNotNull(){ assertEquals(Check.notNull("value","name"),"value"); }

Class: org.apache.hadoop.lib.util.TestConfigurationUtils

InternalCallVerifier EqualityVerifier 
@Test public void testVarResolutionAndSysProps(){ String userName=System.getProperty("user.name"); Configuration conf=new Configuration(false); conf.set("a","A"); conf.set("b","${a}"); conf.set("c","${user.name}"); conf.set("d","${aaa}"); assertEquals(conf.getRaw("a"),"A"); assertEquals(conf.getRaw("b"),"${a}"); assertEquals(conf.getRaw("c"),"${user.name}"); assertEquals(conf.get("a"),"A"); assertEquals(conf.get("b"),"A"); assertEquals(conf.get("c"),userName); assertEquals(conf.get("d"),"${aaa}"); conf.set("user.name","foo"); assertEquals(conf.get("user.name"),"foo"); }

InternalCallVerifier EqualityVerifier 
@Test public void constructors() throws Exception { Configuration conf=new Configuration(false); assertEquals(conf.size(),0); byte[] bytes="aA".getBytes(); InputStream is=new ByteArrayInputStream(bytes); conf=new Configuration(false); ConfigurationUtils.load(conf,is); assertEquals(conf.size(),1); assertEquals(conf.get("a"),"A"); }

InternalCallVerifier EqualityVerifier 
@Test public void copy() throws Exception { Configuration srcConf=new Configuration(false); Configuration targetConf=new Configuration(false); srcConf.set("testParameter1","valueFromSource"); srcConf.set("testParameter2","valueFromSource"); targetConf.set("testParameter2","valueFromTarget"); targetConf.set("testParameter3","valueFromTarget"); ConfigurationUtils.copy(srcConf,targetConf); assertEquals("valueFromSource",targetConf.get("testParameter1")); assertEquals("valueFromSource",targetConf.get("testParameter2")); assertEquals("valueFromTarget",targetConf.get("testParameter3")); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void injectDefaults() throws Exception { Configuration srcConf=new Configuration(false); Configuration targetConf=new Configuration(false); srcConf.set("testParameter1","valueFromSource"); srcConf.set("testParameter2","valueFromSource"); targetConf.set("testParameter2","originalValueFromTarget"); targetConf.set("testParameter3","originalValueFromTarget"); ConfigurationUtils.injectDefaults(srcConf,targetConf); assertEquals("valueFromSource",targetConf.get("testParameter1")); assertEquals("originalValueFromTarget",targetConf.get("testParameter2")); assertEquals("originalValueFromTarget",targetConf.get("testParameter3")); assertEquals("valueFromSource",srcConf.get("testParameter1")); assertEquals("valueFromSource",srcConf.get("testParameter2")); assertNull(srcConf.get("testParameter3")); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void resolve(){ Configuration conf=new Configuration(false); conf.set("a","A"); conf.set("b","${a}"); assertEquals(conf.getRaw("a"),"A"); assertEquals(conf.getRaw("b"),"${a}"); conf=ConfigurationUtils.resolve(conf); assertEquals(conf.getRaw("a"),"A"); assertEquals(conf.getRaw("b"),"A"); }

Class: org.apache.hadoop.lib.wsrs.TestInputStreamEntity

EqualityVerifier 
@Test public void test() throws Exception { InputStream is=new ByteArrayInputStream("abc".getBytes()); ByteArrayOutputStream baos=new ByteArrayOutputStream(); InputStreamEntity i=new InputStreamEntity(is); i.write(baos); baos.close(); assertEquals(new String(baos.toByteArray()),"abc"); is=new ByteArrayInputStream("abc".getBytes()); baos=new ByteArrayOutputStream(); i=new InputStreamEntity(is,1,1); i.write(baos); baos.close(); assertEquals(baos.toByteArray()[0],'b'); }

Class: org.apache.hadoop.lib.wsrs.TestJSONMapProvider

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void test() throws Exception { JSONMapProvider p=new JSONMapProvider(); assertTrue(p.isWriteable(Map.class,null,null,null)); assertFalse(p.isWriteable(this.getClass(),null,null,null)); assertEquals(p.getSize(null,null,null,null,null),-1); ByteArrayOutputStream baos=new ByteArrayOutputStream(); JSONObject json=new JSONObject(); json.put("a","A"); p.writeTo(json,JSONObject.class,null,null,null,null,baos); baos.close(); assertEquals(new String(baos.toByteArray()).trim(),"{\"a\":\"A\"}"); }

Class: org.apache.hadoop.lib.wsrs.TestJSONProvider

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test @SuppressWarnings("unchecked") public void test() throws Exception { JSONProvider p=new JSONProvider(); assertTrue(p.isWriteable(JSONObject.class,null,null,null)); assertFalse(p.isWriteable(this.getClass(),null,null,null)); assertEquals(p.getSize(null,null,null,null,null),-1); ByteArrayOutputStream baos=new ByteArrayOutputStream(); JSONObject json=new JSONObject(); json.put("a","A"); p.writeTo(json,JSONObject.class,null,null,null,null,baos); baos.close(); assertEquals(new String(baos.toByteArray()).trim(),"{\"a\":\"A\"}"); }

Class: org.apache.hadoop.lib.wsrs.TestParam

EqualityVerifier 
@Test public void testShort() throws Exception { Param param=new ShortParam("S",(short)1){ } ; test(param,"S","a short",(short)1,(short)2,"x","" + ((int)Short.MAX_VALUE + 1)); param=new ShortParam("S",(short)1,8){ } ; assertEquals(new Short((short)01777),param.parse("01777")); }

Class: org.apache.hadoop.mapred.JobClientUnitTest

InternalCallVerifier EqualityVerifier 
@Test public void testCleanupTaskReportsWithNullJob() throws Exception { TestJobClient client=new TestJobClient(new JobConf()); Cluster mockCluster=mock(Cluster.class); client.setCluster(mockCluster); JobID id=new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result=client.getCleanupTaskReports(id); assertEquals(0,result.length); verify(mockCluster).getJob(id); }

InternalCallVerifier EqualityVerifier 
@Test public void testMapTaskReportsWithNullJob() throws Exception { TestJobClient client=new TestJobClient(new JobConf()); Cluster mockCluster=mock(Cluster.class); client.setCluster(mockCluster); JobID id=new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result=client.getMapTaskReports(id); assertEquals(0,result.length); verify(mockCluster).getJob(id); }

InternalCallVerifier EqualityVerifier 
@Test public void testSetupTaskReportsWithNullJob() throws Exception { TestJobClient client=new TestJobClient(new JobConf()); Cluster mockCluster=mock(Cluster.class); client.setCluster(mockCluster); JobID id=new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result=client.getSetupTaskReports(id); assertEquals(0,result.length); verify(mockCluster).getJob(id); }

InternalCallVerifier EqualityVerifier 
@Test public void testReduceTaskReportsWithNullJob() throws Exception { TestJobClient client=new TestJobClient(new JobConf()); Cluster mockCluster=mock(Cluster.class); client.setCluster(mockCluster); JobID id=new JobID("test",0); when(mockCluster.getJob(id)).thenReturn(null); TaskReport[] result=client.getReduceTaskReports(id); assertEquals(0,result.length); verify(mockCluster).getJob(id); }

Class: org.apache.hadoop.mapred.TestClientRedirect

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRedirect() throws Exception { Configuration conf=new YarnConfiguration(); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME); conf.set(YarnConfiguration.RM_ADDRESS,RMADDRESS); conf.set(JHAdminConfig.MR_HISTORY_ADDRESS,HSHOSTADDRESS); RMService rmService=new RMService("test"); rmService.init(conf); rmService.start(); AMService amService=new AMService(); amService.init(conf); amService.start(conf); HistoryService historyService=new HistoryService(); historyService.init(conf); historyService.start(conf); LOG.info("services started"); Cluster cluster=new Cluster(conf); org.apache.hadoop.mapreduce.JobID jobID=new org.apache.hadoop.mapred.JobID("201103121733",1); org.apache.hadoop.mapreduce.Counters counters=cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(amContact); LOG.info("Sleeping for 5 seconds before stop for" + " the client socket to not get EOF immediately.."); Thread.sleep(5000); amService.stop(); LOG.info("Sleeping for 5 seconds after stop for" + " the server to exit cleanly.."); Thread.sleep(5000); amRestarting=true; counters=cluster.getJob(jobID).getCounters(); Assert.assertEquals(0,counters.countCounters()); Job job=cluster.getJob(jobID); org.apache.hadoop.mapreduce.TaskID taskId=new org.apache.hadoop.mapreduce.TaskID(jobID,TaskType.MAP,0); TaskAttemptID tId=new TaskAttemptID(taskId,0); job.killJob(); job.killTask(tId); job.failTask(tId); job.getTaskCompletionEvents(0,100); job.getStatus(); job.getTaskDiagnostics(tId); job.getTaskReports(TaskType.MAP); job.getTrackingURL(); amRestarting=false; amService=new AMService(); amService.init(conf); amService.start(conf); amContact=false; counters=cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(amContact); amService.stop(); counters=cluster.getJob(jobID).getCounters(); validateCounters(counters); Assert.assertTrue(hsContact); rmService.stop(); historyService.stop(); }

Class: org.apache.hadoop.mapred.TestClientServiceDelegate

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testJobReportFromHistoryServer() throws Exception { MRClientProtocol historyServerProxy=mock(MRClientProtocol.class); when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponseFromHistoryServer()); ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class); when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null); ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm); JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("TestJobFilePath",jobStatus.getJobFile()); Assert.assertEquals("http://TestTrackingUrl",jobStatus.getTrackingUrl()); Assert.assertEquals(1.0f,jobStatus.getMapProgress(),0.0f); Assert.assertEquals(1.0f,jobStatus.getReduceProgress(),0.0f); }

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testRetriesOnAMConnectionFailures() throws Exception { if (!isAMReachableFromClient) { return; } ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class); when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(getRunningApplicationReport("am1",78)); final MRClientProtocol amProxy=mock(MRClientProtocol.class); when(amProxy.getJobReport(any(GetJobReportRequest.class))).thenThrow(new RuntimeException("11")).thenThrow(new RuntimeException("22")).thenThrow(new RuntimeException("33")).thenThrow(new RuntimeException("44")).thenReturn(getJobReportResponse()); Configuration conf=new YarnConfiguration(); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME); conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED,!isAMReachableFromClient); ClientServiceDelegate clientServiceDelegate=new ClientServiceDelegate(conf,rm,oldJobId,null){ @Override MRClientProtocol instantiateAMProxy( final InetSocketAddress serviceAddr) throws IOException { super.instantiateAMProxy(serviceAddr); return amProxy; } } ; JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals(conf.getInt(MRJobConfig.MR_CLIENT_MAX_RETRIES,MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES),clientServiceDelegate.getMaxClientRetry()); verify(amProxy,times(5)).getJobReport(any(GetJobReportRequest.class)); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAMAccessDisabled() throws IOException { if (isAMReachableFromClient) { return; } MRClientProtocol historyServerProxy=mock(MRClientProtocol.class); when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(getJobReportResponseFromHistoryServer()); ResourceMgrDelegate rmDelegate=mock(ResourceMgrDelegate.class); try { when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getFinishedApplicationReport()); } catch ( YarnException e) { throw new IOException(e); } ClientServiceDelegate clientServiceDelegate=spy(getClientServiceDelegate(historyServerProxy,rmDelegate)); JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("N/A",jobStatus.getJobName()); verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class)); jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("N/A",jobStatus.getJobName()); verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class)); jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("N/A",jobStatus.getJobName()); verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class)); JobStatus jobStatus1=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus1); Assert.assertEquals("TestJobFilePath",jobStatus1.getJobFile()); Assert.assertEquals("http://TestTrackingUrl",jobStatus1.getTrackingUrl()); Assert.assertEquals(1.0f,jobStatus1.getMapProgress(),0.0f); Assert.assertEquals(1.0f,jobStatus1.getReduceProgress(),0.0f); verify(clientServiceDelegate,times(0)).instantiateAMProxy(any(InetSocketAddress.class)); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCountersFromHistoryServer() throws Exception { MRClientProtocol historyServerProxy=mock(MRClientProtocol.class); when(historyServerProxy.getCounters(getCountersRequest())).thenReturn(getCountersResponseFromHistoryServer()); ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class); when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId())).thenReturn(null); ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(historyServerProxy,rm); Counters counters=TypeConverter.toYarn(clientServiceDelegate.getJobCounters(oldJobId)); Assert.assertNotNull(counters); Assert.assertEquals(1001,counters.getCounterGroup("dummyCounters").getCounter("dummyCounter").getValue()); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testReconnectOnAMRestart() throws IOException { if (!isAMReachableFromClient) { return; } MRClientProtocol historyServerProxy=mock(MRClientProtocol.class); ResourceMgrDelegate rmDelegate=mock(ResourceMgrDelegate.class); try { when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(getRunningApplicationReport("am1",78)).thenReturn(getRunningApplicationReport(null,0)).thenReturn(getRunningApplicationReport(null,0)).thenReturn(getRunningApplicationReport("am2",90)); } catch ( YarnException e) { throw new IOException(e); } GetJobReportResponse jobReportResponse1=mock(GetJobReportResponse.class); when(jobReportResponse1.getJobReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"jobName-firstGen","user",JobState.RUNNING,0,0,0,0,0,0,0,"anything",null,false,"")); MRClientProtocol firstGenAMProxy=mock(MRClientProtocol.class); when(firstGenAMProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(jobReportResponse1).thenThrow(new RuntimeException("AM is down!")); GetJobReportResponse jobReportResponse2=mock(GetJobReportResponse.class); when(jobReportResponse2.getJobReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"jobName-secondGen","user",JobState.RUNNING,0,0,0,0,0,0,0,"anything",null,false,"")); MRClientProtocol secondGenAMProxy=mock(MRClientProtocol.class); when(secondGenAMProxy.getJobReport(any(GetJobReportRequest.class))).thenReturn(jobReportResponse2); ClientServiceDelegate clientServiceDelegate=spy(getClientServiceDelegate(historyServerProxy,rmDelegate)); doReturn(firstGenAMProxy).doReturn(secondGenAMProxy).when(clientServiceDelegate).instantiateAMProxy(any(InetSocketAddress.class)); JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("jobName-firstGen",jobStatus.getJobName()); jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("jobName-secondGen",jobStatus.getJobName()); jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertNotNull(jobStatus); Assert.assertEquals("jobName-secondGen",jobStatus.getJobName()); verify(clientServiceDelegate,times(2)).instantiateAMProxy(any(InetSocketAddress.class)); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testHistoryServerNotConfigured() throws Exception { ClientServiceDelegate clientServiceDelegate=getClientServiceDelegate(null,getRMDelegate()); JobStatus jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertEquals("N/A",jobStatus.getUsername()); Assert.assertEquals(JobStatus.State.PREP,jobStatus.getState()); ResourceMgrDelegate rm=mock(ResourceMgrDelegate.class); ApplicationReport applicationReport=getFinishedApplicationReport(); when(rm.getApplicationReport(jobId.getAppId())).thenReturn(applicationReport); clientServiceDelegate=getClientServiceDelegate(null,rm); jobStatus=clientServiceDelegate.getJobStatus(oldJobId); Assert.assertEquals(applicationReport.getUser(),jobStatus.getUsername()); Assert.assertEquals(JobStatus.State.SUCCEEDED,jobStatus.getState()); }

Class: org.apache.hadoop.mapred.TestClock

InternalCallVerifier EqualityVerifier 
@Test(timeout=1000) public void testClock(){ Clock clock=new Clock(); long templateTime=System.currentTimeMillis(); long time=clock.getTime(); assertEquals(templateTime,time,30); }

Class: org.apache.hadoop.mapred.TestClusterStatus

EqualityVerifier 
@SuppressWarnings("deprecation") @Test(timeout=1000) public void testJobTrackerState(){ Assert.assertEquals(JobTracker.State.RUNNING,clusterStatus.getJobTrackerState()); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings("deprecation") @Test(timeout=1000) public void testGraylistedTrackers(){ Assert.assertEquals(0,clusterStatus.getGraylistedTrackers()); Assert.assertTrue(clusterStatus.getGraylistedTrackerNames().isEmpty()); }

Class: org.apache.hadoop.mapred.TestCombineFileInputFormat

InternalCallVerifier EqualityVerifier 
/** * Test getSplits */ @Test @SuppressWarnings("unchecked") public void testSplits() throws IOException { JobConf job=new JobConf(defaultConf); localFs.delete(workDir,true); writeFile(localFs,new Path(workDir,"test.txt"),"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n"); FileInputFormat.setInputPaths(job,workDir); CombineFileInputFormat format=new CombineFileInputFormat(){ @Override public RecordReader getRecordReader( InputSplit split, JobConf job, Reporter reporter) throws IOException { return new CombineFileRecordReader(job,(CombineFileSplit)split,reporter,CombineFileRecordReader.class); } } ; final int SIZE_SPLITS=1; LOG.info("Trying to getSplits with splits = " + SIZE_SPLITS); InputSplit[] splits=format.getSplits(job,SIZE_SPLITS); LOG.info("Got getSplits = " + splits.length); assertEquals("splits == " + SIZE_SPLITS,SIZE_SPLITS,splits.length); }

Class: org.apache.hadoop.mapred.TestCombineSequenceFileInputFormat

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testFormat() throws Exception { JobConf job=new JobConf(conf); Reporter reporter=Reporter.NULL; Random random=new Random(); long seed=random.nextLong(); LOG.info("seed = " + seed); random.setSeed(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int length=10000; final int numFiles=10; createFiles(length,numFiles,random); InputFormat format=new CombineSequenceFileInputFormat(); IntWritable key=new IntWritable(); BytesWritable value=new BytesWritable(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1; LOG.info("splitting: requesting = " + numSplits); InputSplit[] splits=format.getSplits(job,numSplits); LOG.info("splitting: got = " + splits.length); assertEquals("We got more than one splits!",1,splits.length); InputSplit split=splits[0]; assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass()); BitSet bits=new BitSet(length); RecordReader reader=format.getRecordReader(split,job,reporter); try { while (reader.next(key,value)) { assertFalse("Key in multiple partitions.",bits.get(key.get())); bits.set(key.get()); } } finally { reader.close(); } assertEquals("Some keys in no partition.",length,bits.cardinality()); } }

Class: org.apache.hadoop.mapred.TestCombineTextInputFormat

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=10000) public void testFormat() throws Exception { JobConf job=new JobConf(defaultConf); Random random=new Random(); long seed=random.nextLong(); LOG.info("seed = " + seed); random.setSeed(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int length=10000; final int numFiles=10; createFiles(length,numFiles,random); CombineTextInputFormat format=new CombineTextInputFormat(); LongWritable key=new LongWritable(); Text value=new Text(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(length / 20) + 1; LOG.info("splitting: requesting = " + numSplits); InputSplit[] splits=format.getSplits(job,numSplits); LOG.info("splitting: got = " + splits.length); assertEquals("We got more than one splits!",1,splits.length); InputSplit split=splits[0]; assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass()); BitSet bits=new BitSet(length); LOG.debug("split= " + split); RecordReader reader=format.getRecordReader(split,job,voidReporter); try { int count=0; while (reader.next(key,value)) { int v=Integer.parseInt(value.toString()); LOG.debug("read " + v); if (bits.get(v)) { LOG.warn("conflict with " + v + " at position "+ reader.getPos()); } assertFalse("Key in multiple partitions.",bits.get(v)); bits.set(v); count++; } LOG.info("splits=" + split + " count="+ count); } finally { reader.close(); } assertEquals("Some keys in no partition.",length,bits.cardinality()); } }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
/** * Test using the gzip codec for reading */ @Test(timeout=10000) public void testGzip() throws IOException { JobConf job=new JobConf(defaultConf); CompressionCodec gzip=new GzipCodec(); ReflectionUtils.setConf(gzip,job); localFs.delete(workDir,true); writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n"); writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n"); FileInputFormat.setInputPaths(job,workDir); CombineTextInputFormat format=new CombineTextInputFormat(); InputSplit[] splits=format.getSplits(job,100); assertEquals("compressed splits == 1",1,splits.length); List results=readSplit(format,splits[0],job); assertEquals("splits[0] length",8,results.size()); final String[] firstList={"the quick","brown","fox jumped","over"," the lazy"," dog"}; final String[] secondList={"this is a test","of gzip"}; String first=results.get(0).toString(); if (first.equals(firstList[0])) { testResults(results,firstList,secondList); } else if (first.equals(secondList[0])) { testResults(results,secondList,firstList); } else { fail("unexpected first token!"); } }

Class: org.apache.hadoop.mapred.TestConcatenatedCompressedInput

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test using the raw Inflater codec for reading gzip files. */ @Test public void testPrototypeInflaterGzip() throws IOException { CompressionCodec gzip=new GzipCodec(); localFs.delete(workDir,true); System.out.println(COLOR_BR_BLUE + "testPrototypeInflaterGzip() using " + "non-native/Java Inflater and manual gzip header/trailer parsing"+ COLOR_NORMAL); final String fn="concat" + gzip.getDefaultExtension(); Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn); Path fnHDFS=new Path(workDir,fn); localFs.copyFromLocalFile(fnLocal,fnHDFS); final FileInputStream in=new FileInputStream(fnLocal.toString()); assertEquals("concat bytes available",148,in.available()); byte[] compressedBuf=new byte[256]; int numBytesRead=in.read(compressedBuf,0,10); assertEquals("header bytes read",10,numBytesRead); assertEquals("1st byte",0x1f,compressedBuf[0] & 0xff); assertEquals("2nd byte",0x8b,compressedBuf[1] & 0xff); assertEquals("3rd byte (compression method)",8,compressedBuf[2] & 0xff); byte flags=(byte)(compressedBuf[3] & 0xff); if ((flags & 0x04) != 0) { numBytesRead=in.read(compressedBuf,0,2); assertEquals("XLEN bytes read",2,numBytesRead); int xlen=((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff; in.skip(xlen); } if ((flags & 0x08) != 0) { while ((numBytesRead=in.read()) != 0) { assertFalse("unexpected end-of-file while reading filename",numBytesRead == -1); } } if ((flags & 0x10) != 0) { while ((numBytesRead=in.read()) != 0) { assertFalse("unexpected end-of-file while reading comment",numBytesRead == -1); } } if ((flags & 0xe0) != 0) { assertTrue("reserved bits are set??",(flags & 0xe0) == 0); } if ((flags & 0x02) != 0) { numBytesRead=in.read(compressedBuf,0,2); assertEquals("CRC16 bytes read",2,numBytesRead); int crc16=((compressedBuf[1] << 8) | compressedBuf[0]) & 0xffff; } numBytesRead=in.read(compressedBuf); byte[] uncompressedBuf=new byte[256]; Inflater inflater=new Inflater(true); inflater.setInput(compressedBuf,0,numBytesRead); try { int numBytesUncompressed=inflater.inflate(uncompressedBuf); String outString=new String(uncompressedBuf,0,numBytesUncompressed,"UTF-8"); System.out.println("uncompressed data of first gzip member = [" + outString + "]"); } catch ( java.util.zip.DataFormatException ex) { throw new IOException(ex.getMessage()); } in.close(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test using the bzip2 codec for reading */ @Test public void testBzip2() throws IOException { JobConf jobConf=new JobConf(defaultConf); CompressionCodec bzip2=new BZip2Codec(); ReflectionUtils.setConf(bzip2,jobConf); localFs.delete(workDir,true); System.out.println(COLOR_BR_CYAN + "testBzip2() using non-native CBZip2InputStream (presumably)" + COLOR_NORMAL); final String fn="concat" + bzip2.getDefaultExtension(); Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn); Path fnHDFS=new Path(workDir,fn); localFs.copyFromLocalFile(fnLocal,fnHDFS); writeFile(localFs,new Path(workDir,"part2.txt.bz2"),bzip2,"this is a test\nof bzip2\n"); FileInputFormat.setInputPaths(jobConf,workDir); TextInputFormat format=new TextInputFormat(); format.configure(jobConf); format.setMinSplitSize(256); InputSplit[] splits=format.getSplits(jobConf,100); assertEquals("compressed splits == 2",2,splits.length); FileSplit tmp=(FileSplit)splits[0]; if (tmp.getPath().getName().equals("part2.txt.bz2")) { splits[0]=splits[1]; splits[1]=tmp; } List results=readSplit(format,splits[0],jobConf); assertEquals("splits[0] num lines",6,results.size()); assertEquals("splits[0][5]","member #3",results.get(5).toString()); results=readSplit(format,splits[1],jobConf); assertEquals("splits[1] num lines",2,results.size()); assertEquals("splits[1][0]","this is a test",results.get(0).toString()); assertEquals("splits[1][1]","of bzip2",results.get(1).toString()); }

InternalCallVerifier EqualityVerifier 
/** * Extended bzip2 test, similar to BuiltInGzipDecompressor test above. */ @Test public void testMoreBzip2() throws IOException { JobConf jobConf=new JobConf(defaultConf); CompressionCodec bzip2=new BZip2Codec(); ReflectionUtils.setConf(bzip2,jobConf); localFs.delete(workDir,true); System.out.println(COLOR_BR_MAGENTA + "testMoreBzip2() using non-native CBZip2InputStream (presumably)" + COLOR_NORMAL); String fn1="testConcatThenCompress.txt" + bzip2.getDefaultExtension(); Path fnLocal1=new Path(System.getProperty("test.concat.data","/tmp"),fn1); Path fnHDFS1=new Path(workDir,fn1); localFs.copyFromLocalFile(fnLocal1,fnHDFS1); String fn2="testCompressThenConcat.txt" + bzip2.getDefaultExtension(); Path fnLocal2=new Path(System.getProperty("test.concat.data","/tmp"),fn2); Path fnHDFS2=new Path(workDir,fn2); localFs.copyFromLocalFile(fnLocal2,fnHDFS2); FileInputFormat.setInputPaths(jobConf,workDir); final FileInputStream in1=new FileInputStream(fnLocal1.toString()); final FileInputStream in2=new FileInputStream(fnLocal2.toString()); assertEquals("concat bytes available",2567,in1.available()); assertEquals("concat bytes available",3056,in2.available()); doMultipleBzip2BufferSizes(jobConf,false); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test using Hadoop's original, native-zlib gzip codec for reading. */ @Test public void testGzip() throws IOException { JobConf jobConf=new JobConf(defaultConf); CompressionCodec gzip=new GzipCodec(); ReflectionUtils.setConf(gzip,jobConf); localFs.delete(workDir,true); if (org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor.class == gzip.getDecompressorType()) { System.out.println(COLOR_BR_RED + "testGzip() using native-zlib Decompressor (" + gzip.getDecompressorType()+ ")"+ COLOR_NORMAL); } else { LOG.warn("testGzip() skipped: native (C/C++) libs not loaded"); return; } final String fn="concat" + gzip.getDefaultExtension(); Path fnLocal=new Path(System.getProperty("test.concat.data","/tmp"),fn); Path fnHDFS=new Path(workDir,fn); localFs.copyFromLocalFile(fnLocal,fnHDFS); writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n"); FileInputFormat.setInputPaths(jobConf,workDir); TextInputFormat format=new TextInputFormat(); format.configure(jobConf); InputSplit[] splits=format.getSplits(jobConf,100); assertEquals("compressed splits == 2",2,splits.length); FileSplit tmp=(FileSplit)splits[0]; if (tmp.getPath().getName().equals("part2.txt.gz")) { splits[0]=splits[1]; splits[1]=tmp; } List results=readSplit(format,splits[0],jobConf); assertEquals("splits[0] num lines",6,results.size()); assertEquals("splits[0][5]","member #3",results.get(5).toString()); results=readSplit(format,splits[1],jobConf); assertEquals("splits[1] num lines",2,results.size()); assertEquals("splits[1][0]","this is a test",results.get(0).toString()); assertEquals("splits[1][1]","of gzip",results.get(1).toString()); }

InternalCallVerifier EqualityVerifier 
/** * Test using the new BuiltInGzipDecompressor codec for reading gzip files. */ @Test public void testBuiltInGzipDecompressor() throws IOException { JobConf jobConf=new JobConf(defaultConf); jobConf.setBoolean("io.native.lib.available",false); CompressionCodec gzip=new GzipCodec(); ReflectionUtils.setConf(gzip,jobConf); localFs.delete(workDir,true); assertEquals("[non-native (Java) codec]",org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor.class,gzip.getDecompressorType()); System.out.println(COLOR_BR_YELLOW + "testBuiltInGzipDecompressor() using" + " non-native (Java Inflater) Decompressor ("+ gzip.getDecompressorType()+ ")"+ COLOR_NORMAL); String fn1="testConcatThenCompress.txt" + gzip.getDefaultExtension(); Path fnLocal1=new Path(System.getProperty("test.concat.data","/tmp"),fn1); Path fnHDFS1=new Path(workDir,fn1); localFs.copyFromLocalFile(fnLocal1,fnHDFS1); String fn2="testCompressThenConcat.txt" + gzip.getDefaultExtension(); Path fnLocal2=new Path(System.getProperty("test.concat.data","/tmp"),fn2); Path fnHDFS2=new Path(workDir,fn2); localFs.copyFromLocalFile(fnLocal2,fnHDFS2); FileInputFormat.setInputPaths(jobConf,workDir); final FileInputStream in1=new FileInputStream(fnLocal1.toString()); final FileInputStream in2=new FileInputStream(fnLocal2.toString()); assertEquals("concat bytes available",2734,in1.available()); assertEquals("concat bytes available",3413,in2.available()); CompressionInputStream cin2=gzip.createInputStream(in2); LineReader in=new LineReader(cin2); Text out=new Text(); int numBytes, totalBytes=0, lineNum=0; while ((numBytes=in.readLine(out)) > 0) { ++lineNum; totalBytes+=numBytes; } in.close(); assertEquals("total uncompressed bytes in concatenated test file",5346,totalBytes); assertEquals("total uncompressed lines in concatenated test file",84,lineNum); doMultipleGzipBufferSizes(jobConf,false); doMultipleGzipBufferSizes(jobConf,true); }

Class: org.apache.hadoop.mapred.TestCounters

InternalCallVerifier EqualityVerifier 
@Test public void testLegacyGetGroupNames(){ Counters counters=new Counters(); counters.findCounter("fs1",FileSystemCounter.BYTES_READ).increment(1); counters.findCounter("fs2",FileSystemCounter.BYTES_READ).increment(1); counters.incrCounter("group1","counter1",1); HashSet groups=new HashSet(counters.getGroupNames()); HashSet expectedGroups=new HashSet(); expectedGroups.add("group1"); expectedGroups.add("FileSystemCounters"); expectedGroups.add("org.apache.hadoop.mapreduce.FileSystemCounter"); assertEquals(expectedGroups,groups); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testMakeCompactString(){ final String GC1="group1.counter1:1"; final String GC2="group2.counter2:3"; Counters counters=new Counters(); counters.incrCounter("group1","counter1",1); assertEquals("group1.counter1:1",counters.makeCompactString()); counters.incrCounter("group2","counter2",3); String cs=counters.makeCompactString(); assertTrue("Bad compact string",cs.equals(GC1 + ',' + GC2) || cs.equals(GC2 + ',' + GC1)); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Verify counter value works */ @SuppressWarnings("deprecation") @Test public void testCounterValue(){ Counters counters=new Counters(); final int NUMBER_TESTS=100; final int NUMBER_INC=10; final Random rand=new Random(); for (int i=0; i < NUMBER_TESTS; i++) { long initValue=rand.nextInt(); long expectedValue=initValue; Counter counter=counters.findCounter("foo","bar"); counter.setValue(initValue); assertEquals("Counter value is not initialized correctly",expectedValue,counter.getValue()); for (int j=0; j < NUMBER_INC; j++) { int incValue=rand.nextInt(); counter.increment(incValue); expectedValue+=incValue; assertEquals("Counter value is not incremented correctly",expectedValue,counter.getValue()); } expectedValue=rand.nextInt(); counter.setValue(expectedValue); assertEquals("Counter value is not set correctly",expectedValue,counter.getValue()); } }

Class: org.apache.hadoop.mapred.TestFadvisedFileRegion

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=100000) public void testCustomShuffleTransfer() throws IOException { File absLogDir=new File("target",TestFadvisedFileRegion.class.getSimpleName() + "LocDir").getAbsoluteFile(); String testDirPath=StringUtils.join(Path.SEPARATOR,new String[]{absLogDir.getAbsolutePath(),"testCustomShuffleTransfer"}); File testDir=new File(testDirPath); testDir.mkdirs(); System.out.println(testDir.getAbsolutePath()); File inFile=new File(testDir,"fileIn.out"); File outFile=new File(testDir,"fileOut.out"); byte[] initBuff=new byte[FILE_SIZE]; Random rand=new Random(); rand.nextBytes(initBuff); FileOutputStream out=new FileOutputStream(inFile); try { out.write(initBuff); } finally { IOUtils.cleanup(LOG,out); } int position=2 * 1024 * 1024; int count=4 * 1024 * 1024 - 1; RandomAccessFile inputFile=null; RandomAccessFile targetFile=null; WritableByteChannel target=null; FadvisedFileRegion fileRegion=null; try { inputFile=new RandomAccessFile(inFile.getAbsolutePath(),"r"); targetFile=new RandomAccessFile(outFile.getAbsolutePath(),"rw"); target=targetFile.getChannel(); Assert.assertEquals(FILE_SIZE,inputFile.length()); fileRegion=new FadvisedFileRegion(inputFile,position,count,false,0,null,null,1024,false); customShuffleTransferCornerCases(fileRegion,target,count); long pos=0; long size; while ((size=fileRegion.customShuffleTransfer(target,pos)) > 0) { pos+=size; } Assert.assertEquals(count,(int)pos); Assert.assertEquals(count,targetFile.length()); } finally { if (fileRegion != null) { fileRegion.releaseExternalResources(); } IOUtils.cleanup(LOG,target); IOUtils.cleanup(LOG,targetFile); IOUtils.cleanup(LOG,inputFile); } byte[] buff=new byte[FILE_SIZE]; FileInputStream in=new FileInputStream(outFile); try { int total=in.read(buff,0,count); Assert.assertEquals(count,total); for (int i=0; i < count; i++) { Assert.assertEquals(initBuff[position + i],buff[i]); } } finally { IOUtils.cleanup(LOG,in); } inFile.delete(); outFile.delete(); testDir.delete(); absLogDir.delete(); }

Class: org.apache.hadoop.mapred.TestFileInputFormat

InternalCallVerifier EqualityVerifier 
@Test public void testListLocatedStatus() throws Exception { Configuration conf=getConfiguration(); conf.setBoolean("fs.test.impl.disable.cache",false); conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads); conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2"); MockFileSystem mockFs=(MockFileSystem)new Path("test:///").getFileSystem(conf); Assert.assertEquals("listLocatedStatus already called",0,mockFs.numListLocatedStatusCalls); JobConf job=new JobConf(conf); TextInputFormat fileInputFormat=new TextInputFormat(); fileInputFormat.configure(job); InputSplit[] splits=fileInputFormat.getSplits(job,1); Assert.assertEquals("Input splits are not correct",2,splits.length); Assert.assertEquals("listLocatedStatuss calls",1,mockFs.numListLocatedStatusCalls); FileSystem.closeAll(); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testSplitLocationInfo() throws Exception { Configuration conf=getConfiguration(); conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2"); JobConf job=new JobConf(conf); TextInputFormat fileInputFormat=new TextInputFormat(); fileInputFormat.configure(job); FileSplit[] splits=(FileSplit[])fileInputFormat.getSplits(job,1); String[] locations=splits[0].getLocations(); Assert.assertEquals(2,locations.length); SplitLocationInfo[] locationInfo=splits[0].getLocationInfo(); Assert.assertEquals(2,locationInfo.length); SplitLocationInfo localhostInfo=locations[0].equals("localhost") ? locationInfo[0] : locationInfo[1]; SplitLocationInfo otherhostInfo=locations[0].equals("otherhost") ? locationInfo[0] : locationInfo[1]; Assert.assertTrue(localhostInfo.isOnDisk()); Assert.assertTrue(localhostInfo.isInMemory()); Assert.assertTrue(otherhostInfo.isOnDisk()); Assert.assertFalse(otherhostInfo.isInMemory()); }

UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testListStatusErrorOnNonExistantDir() throws IOException { Configuration conf=new Configuration(); conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads); org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat.configureTestErrorOnNonExistantDir(conf,localFs); JobConf jobConf=new JobConf(conf); TextInputFormat fif=new TextInputFormat(); fif.configure(jobConf); try { fif.listStatus(jobConf); Assert.fail("Expecting an IOException for a missing Input path"); } catch ( IOException e) { Path expectedExceptionPath=new Path(TEST_ROOT_DIR,"input2"); expectedExceptionPath=localFs.makeQualified(expectedExceptionPath); Assert.assertTrue(e instanceof InvalidInputException); Assert.assertEquals("Input path does not exist: " + expectedExceptionPath.toString(),e.getMessage()); } }

Class: org.apache.hadoop.mapred.TestFixedLengthInputFormat

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test using the gzip codec with two input files. */ @Test(timeout=5000) public void testGzipWithTwoInputs() throws IOException { CompressionCodec gzip=new GzipCodec(); localFs.delete(workDir,true); FixedLengthInputFormat format=new FixedLengthInputFormat(); JobConf job=new JobConf(defaultConf); format.setRecordLength(job,5); FileInputFormat.setInputPaths(job,workDir); ReflectionUtils.setConf(gzip,job); format.configure(job); writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"one two threefour five six seveneightnine ten "); writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"ten nine eightsevensix five four threetwo one "); InputSplit[] splits=format.getSplits(job,100); assertEquals("compressed splits == 2",2,splits.length); FileSplit tmp=(FileSplit)splits[0]; if (tmp.getPath().getName().equals("part2.txt.gz")) { splits[0]=splits[1]; splits[1]=tmp; } List results=readSplit(format,splits[0],job); assertEquals("splits[0] length",10,results.size()); assertEquals("splits[0][5]","six ",results.get(5)); results=readSplit(format,splits[1],job); assertEquals("splits[1] length",10,results.size()); assertEquals("splits[1][0]","ten ",results.get(0)); assertEquals("splits[1][1]","nine ",results.get(1)); }

Class: org.apache.hadoop.mapred.TestIFile

APIUtilityVerifier EqualityVerifier PublicFieldVerifier 
@Test public void testIFileReaderWithCodec() throws Exception { Configuration conf=new Configuration(); FileSystem localFs=FileSystem.getLocal(conf); FileSystem rfs=((LocalFileSystem)localFs).getRaw(); Path path=new Path(new Path("build/test.ifile"),"data"); DefaultCodec codec=new GzipCodec(); codec.setConf(conf); FSDataOutputStream out=rfs.create(path); IFile.Writer writer=new IFile.Writer(conf,out,Text.class,Text.class,codec,null); writer.close(); FSDataInputStream in=rfs.open(path); IFile.Reader reader=new IFile.Reader(conf,in,rfs.getFileStatus(path).getLen(),codec,null); reader.close(); byte[] ab=new byte[100]; int readed=reader.checksumIn.readWithChecksum(ab,0,ab.length); assertEquals(readed,reader.checksumIn.getChecksum().length); }

Class: org.apache.hadoop.mapred.TestJobClient

InternalCallVerifier EqualityVerifier 
@Test public void testGetClusterStatusWithLocalJobRunner() throws Exception { Configuration conf=new Configuration(); conf.set(JTConfig.JT_IPC_ADDRESS,MRConfig.LOCAL_FRAMEWORK_NAME); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.LOCAL_FRAMEWORK_NAME); JobClient client=new JobClient(conf); ClusterStatus clusterStatus=client.getClusterStatus(true); Collection activeTrackerNames=clusterStatus.getActiveTrackerNames(); Assert.assertEquals(0,activeTrackerNames.size()); int blacklistedTrackers=clusterStatus.getBlacklistedTrackers(); Assert.assertEquals(0,blacklistedTrackers); Collection blackListedTrackersInfo=clusterStatus.getBlackListedTrackersInfo(); Assert.assertEquals(0,blackListedTrackersInfo.size()); }

Class: org.apache.hadoop.mapred.TestJobConf

InternalCallVerifier EqualityVerifier 
/** * Ensure that M/R 1.x applications can get and set task virtual memory with * old property names */ @SuppressWarnings("deprecation") @Test(timeout=1000) public void testDeprecatedPropertyNameForTaskVmem(){ JobConf configuration=new JobConf(); configuration.setLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,1024); configuration.setLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,1024); Assert.assertEquals(1024,configuration.getMemoryForMapTask()); Assert.assertEquals(1024,configuration.getMemoryForReduceTask()); configuration.setLong(JobConf.MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY,1025); configuration.setLong(JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY,1025); Assert.assertEquals(1025,configuration.getMemoryForMapTask()); Assert.assertEquals(1025,configuration.getMemoryForReduceTask()); configuration.setMemoryForMapTask(2048); configuration.setMemoryForReduceTask(2048); Assert.assertEquals(2048,configuration.getLong(JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY,-1)); Assert.assertEquals(2048,configuration.getLong(JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY,-1)); Assert.assertEquals(2048,configuration.getLong(JobConf.MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY,-1)); Assert.assertEquals(2048,configuration.getLong(JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY,-1)); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test getters and setters of JobConf */ @SuppressWarnings("deprecation") @Test(timeout=5000) public void testJobConf(){ JobConf conf=new JobConf(); Pattern pattern=conf.getJarUnpackPattern(); assertEquals(Pattern.compile("(?:classes/|lib/).*").toString(),pattern.toString()); assertFalse(conf.getKeepFailedTaskFiles()); conf.setKeepFailedTaskFiles(true); assertTrue(conf.getKeepFailedTaskFiles()); assertNull(conf.getKeepTaskFilesPattern()); conf.setKeepTaskFilesPattern("123454"); assertEquals("123454",conf.getKeepTaskFilesPattern()); assertNotNull(conf.getWorkingDirectory()); conf.setWorkingDirectory(new Path("test")); assertTrue(conf.getWorkingDirectory().toString().endsWith("test")); assertEquals(1,conf.getNumTasksToExecutePerJvm()); assertNull(conf.getKeyFieldComparatorOption()); conf.setKeyFieldComparatorOptions("keySpec"); assertEquals("keySpec",conf.getKeyFieldComparatorOption()); assertFalse(conf.getUseNewReducer()); conf.setUseNewReducer(true); assertTrue(conf.getUseNewReducer()); assertTrue(conf.getMapSpeculativeExecution()); assertTrue(conf.getReduceSpeculativeExecution()); assertTrue(conf.getSpeculativeExecution()); conf.setReduceSpeculativeExecution(false); assertTrue(conf.getSpeculativeExecution()); conf.setMapSpeculativeExecution(false); assertFalse(conf.getSpeculativeExecution()); assertFalse(conf.getMapSpeculativeExecution()); assertFalse(conf.getReduceSpeculativeExecution()); conf.setSessionId("ses"); assertEquals("ses",conf.getSessionId()); assertEquals(3,conf.getMaxTaskFailuresPerTracker()); conf.setMaxTaskFailuresPerTracker(2); assertEquals(2,conf.getMaxTaskFailuresPerTracker()); assertEquals(0,conf.getMaxMapTaskFailuresPercent()); conf.setMaxMapTaskFailuresPercent(50); assertEquals(50,conf.getMaxMapTaskFailuresPercent()); assertEquals(0,conf.getMaxReduceTaskFailuresPercent()); conf.setMaxReduceTaskFailuresPercent(70); assertEquals(70,conf.getMaxReduceTaskFailuresPercent()); assertEquals(JobPriority.NORMAL.name(),conf.getJobPriority().name()); conf.setJobPriority(JobPriority.HIGH); assertEquals(JobPriority.HIGH.name(),conf.getJobPriority().name()); assertNull(conf.getJobSubmitHostName()); conf.setJobSubmitHostName("hostname"); assertEquals("hostname",conf.getJobSubmitHostName()); assertNull(conf.getJobSubmitHostAddress()); conf.setJobSubmitHostAddress("ww"); assertEquals("ww",conf.getJobSubmitHostAddress()); assertFalse(conf.getProfileEnabled()); conf.setProfileEnabled(true); assertTrue(conf.getProfileEnabled()); assertEquals(conf.getProfileTaskRange(true).toString(),"0-2"); assertEquals(conf.getProfileTaskRange(false).toString(),"0-2"); conf.setProfileTaskRange(true,"0-3"); assertEquals(conf.getProfileTaskRange(false).toString(),"0-2"); assertEquals(conf.getProfileTaskRange(true).toString(),"0-3"); assertNull(conf.getMapDebugScript()); conf.setMapDebugScript("mDbgScript"); assertEquals("mDbgScript",conf.getMapDebugScript()); assertNull(conf.getReduceDebugScript()); conf.setReduceDebugScript("rDbgScript"); assertEquals("rDbgScript",conf.getReduceDebugScript()); assertNull(conf.getJobLocalDir()); assertEquals("default",conf.getQueueName()); conf.setQueueName("qname"); assertEquals("qname",conf.getQueueName()); conf.setMemoryForMapTask(100 * 1000); assertEquals(100 * 1000,conf.getMemoryForMapTask()); conf.setMemoryForReduceTask(1000 * 1000); assertEquals(1000 * 1000,conf.getMemoryForReduceTask()); assertEquals(-1,conf.getMaxPhysicalMemoryForTask()); assertEquals("The variable key is no longer used.",JobConf.deprecatedString("key")); assertEquals("mapreduce.map.java.opts should not be set by default",null,conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS)); assertEquals("mapreduce.reduce.java.opts should not be set by default",null,conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS)); }

Class: org.apache.hadoop.mapred.TestJobInfo

InternalCallVerifier EqualityVerifier 
@Test(timeout=5000) public void testJobInfo() throws IOException { JobID jid=new JobID("001",1); Text user=new Text("User"); Path path=new Path("/tmp/test"); JobInfo info=new JobInfo(jid,user,path); ByteArrayOutputStream out=new ByteArrayOutputStream(); info.write(new DataOutputStream(out)); JobInfo copyinfo=new JobInfo(); copyinfo.readFields(new DataInputStream(new ByteArrayInputStream(out.toByteArray()))); assertEquals(info.getJobID().toString(),copyinfo.getJobID().toString()); assertEquals(info.getJobSubmitDir().getName(),copyinfo.getJobSubmitDir().getName()); assertEquals(info.getUser().toString(),copyinfo.getUser().toString()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=5000) public void testTaskID() throws IOException, InterruptedException { JobID jobid=new JobID("1014873536921",6); TaskID tid=new TaskID(jobid,TaskType.MAP,0); org.apache.hadoop.mapred.TaskID tid1=org.apache.hadoop.mapred.TaskID.downgrade(tid); org.apache.hadoop.mapred.TaskReport treport=new org.apache.hadoop.mapred.TaskReport(tid1,0.0f,State.FAILED.toString(),null,TIPStatus.FAILED,100,100,new org.apache.hadoop.mapred.Counters()); Assert.assertEquals(treport.getTaskId(),"task_1014873536921_0006_m_000000"); Assert.assertEquals(treport.getTaskID().toString(),"task_1014873536921_0006_m_000000"); }

Class: org.apache.hadoop.mapred.TestLineRecordReaderJobs

InternalCallVerifier EqualityVerifier 
/** * Test the case when a custom record delimiter is specified using the * textinputformat.record.delimiter configuration property * @throws IOException * @throws InterruptedException * @throws ClassNotFoundException */ @Test public void testCustomRecordDelimiters() throws IOException, InterruptedException, ClassNotFoundException { Configuration conf=new Configuration(); conf.set("textinputformat.record.delimiter","\t\n"); conf.setInt("mapreduce.job.maps",1); FileSystem localFs=FileSystem.getLocal(conf); localFs.delete(workDir,true); createInputFile(conf); createAndRunJob(conf); String expected="0\tabc\ndef\n9\tghi\njkl\n"; assertEquals(expected,readOutputFile(conf)); }

EqualityVerifier 
/** * Test the default behavior when the textinputformat.record.delimiter * configuration property is not specified * @throws IOException * @throws InterruptedException * @throws ClassNotFoundException */ @Test public void testDefaultRecordDelimiters() throws IOException, InterruptedException, ClassNotFoundException { Configuration conf=new Configuration(); FileSystem localFs=FileSystem.getLocal(conf); localFs.delete(workDir,true); createInputFile(conf); createAndRunJob(conf); String expected="0\tabc\n4\tdef\t\n9\tghi\n13\tjkl\n"; assertEquals(expected,readOutputFile(conf)); }

Class: org.apache.hadoop.mapred.TestLocalContainerLauncher

InternalCallVerifier EqualityVerifier 
@Test public void testRenameMapOutputForReduce() throws Exception { final JobConf conf=new JobConf(); final MROutputFiles mrOutputFiles=new MROutputFiles(); mrOutputFiles.setConf(conf); conf.set(MRConfig.LOCAL_DIR,localDirs[0].toString()); final Path mapOut=mrOutputFiles.getOutputFileForWrite(1); conf.set(MRConfig.LOCAL_DIR,localDirs[1].toString()); final Path mapOutIdx=mrOutputFiles.getOutputIndexFileForWrite(1); Assert.assertNotEquals("Paths must be different!",mapOut.getParent(),mapOutIdx.getParent()); conf.setStrings(MRConfig.LOCAL_DIR,localDirs); final FileContext lfc=FileContext.getLocalFSFileContext(conf); lfc.create(mapOut,EnumSet.of(CREATE)).close(); lfc.create(mapOutIdx,EnumSet.of(CREATE)).close(); final JobId jobId=MRBuilderUtils.newJobId(12345L,1,2); final TaskId tid=MRBuilderUtils.newTaskId(jobId,0,TaskType.MAP); final TaskAttemptId taid=MRBuilderUtils.newTaskAttemptId(tid,0); LocalContainerLauncher.renameMapOutputForReduce(conf,taid,mrOutputFiles); }

Class: org.apache.hadoop.mapred.TestLocalModeWithNewApis

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testNewApis() throws Exception { Random r=new Random(System.currentTimeMillis()); Path tmpBaseDir=new Path("/tmp/wc-" + r.nextInt()); final Path inDir=new Path(tmpBaseDir,"input"); final Path outDir=new Path(tmpBaseDir,"output"); String input="The quick brown fox\nhas many silly\nred fox sox\n"; FileSystem inFs=inDir.getFileSystem(conf); FileSystem outFs=outDir.getFileSystem(conf); outFs.delete(outDir,true); if (!inFs.mkdirs(inDir)) { throw new IOException("Mkdirs failed to create " + inDir.toString()); } { DataOutputStream file=inFs.create(new Path(inDir,"part-0")); file.writeBytes(input); file.close(); } Job job=Job.getInstance(conf,"word count"); job.setJarByClass(TestLocalModeWithNewApis.class); job.setMapperClass(TokenizerMapper.class); job.setCombinerClass(IntSumReducer.class); job.setReducerClass(IntSumReducer.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); FileInputFormat.addInputPath(job,inDir); FileOutputFormat.setOutputPath(job,outDir); assertEquals(job.waitForCompletion(true),true); String output=readOutput(outDir,conf); assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" + "quick\t1\nred\t1\nsilly\t1\nsox\t1\n",output); outFs.delete(tmpBaseDir,true); }

Class: org.apache.hadoop.mapred.TestMRWithDistributedCache

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=1000) public void testDeprecatedFunctions() throws Exception { DistributedCache.addLocalArchives(conf,"Test Local Archives 1"); Assert.assertEquals("Test Local Archives 1",conf.get(DistributedCache.CACHE_LOCALARCHIVES)); Assert.assertEquals(1,DistributedCache.getLocalCacheArchives(conf).length); Assert.assertEquals("Test Local Archives 1",DistributedCache.getLocalCacheArchives(conf)[0].getName()); DistributedCache.addLocalArchives(conf,"Test Local Archives 2"); Assert.assertEquals("Test Local Archives 1,Test Local Archives 2",conf.get(DistributedCache.CACHE_LOCALARCHIVES)); Assert.assertEquals(2,DistributedCache.getLocalCacheArchives(conf).length); Assert.assertEquals("Test Local Archives 2",DistributedCache.getLocalCacheArchives(conf)[1].getName()); DistributedCache.setLocalArchives(conf,"Test Local Archives 3"); Assert.assertEquals("Test Local Archives 3",conf.get(DistributedCache.CACHE_LOCALARCHIVES)); Assert.assertEquals(1,DistributedCache.getLocalCacheArchives(conf).length); Assert.assertEquals("Test Local Archives 3",DistributedCache.getLocalCacheArchives(conf)[0].getName()); DistributedCache.addLocalFiles(conf,"Test Local Files 1"); Assert.assertEquals("Test Local Files 1",conf.get(DistributedCache.CACHE_LOCALFILES)); Assert.assertEquals(1,DistributedCache.getLocalCacheFiles(conf).length); Assert.assertEquals("Test Local Files 1",DistributedCache.getLocalCacheFiles(conf)[0].getName()); DistributedCache.addLocalFiles(conf,"Test Local Files 2"); Assert.assertEquals("Test Local Files 1,Test Local Files 2",conf.get(DistributedCache.CACHE_LOCALFILES)); Assert.assertEquals(2,DistributedCache.getLocalCacheFiles(conf).length); Assert.assertEquals("Test Local Files 2",DistributedCache.getLocalCacheFiles(conf)[1].getName()); DistributedCache.setLocalFiles(conf,"Test Local Files 3"); Assert.assertEquals("Test Local Files 3",conf.get(DistributedCache.CACHE_LOCALFILES)); Assert.assertEquals(1,DistributedCache.getLocalCacheFiles(conf).length); Assert.assertEquals("Test Local Files 3",DistributedCache.getLocalCacheFiles(conf)[0].getName()); DistributedCache.setArchiveTimestamps(conf,"1234567890"); Assert.assertEquals(1234567890,conf.getLong(DistributedCache.CACHE_ARCHIVES_TIMESTAMPS,0)); Assert.assertEquals(1,DistributedCache.getArchiveTimestamps(conf).length); Assert.assertEquals(1234567890,DistributedCache.getArchiveTimestamps(conf)[0]); DistributedCache.setFileTimestamps(conf,"1234567890"); Assert.assertEquals(1234567890,conf.getLong(DistributedCache.CACHE_FILES_TIMESTAMPS,0)); Assert.assertEquals(1,DistributedCache.getFileTimestamps(conf).length); Assert.assertEquals(1234567890,DistributedCache.getFileTimestamps(conf)[0]); DistributedCache.createAllSymlink(conf,new File("Test Job Cache Dir"),new File("Test Work Dir")); Assert.assertNull(conf.get(DistributedCache.CACHE_SYMLINK)); Assert.assertTrue(DistributedCache.getSymlink(conf)); Assert.assertTrue(symlinkFile.createNewFile()); FileStatus fileStatus=DistributedCache.getFileStatus(conf,symlinkFile.toURI()); Assert.assertNotNull(fileStatus); Assert.assertEquals(fileStatus.getModificationTime(),DistributedCache.getTimestamp(conf,symlinkFile.toURI())); Assert.assertTrue(symlinkFile.delete()); DistributedCache.addCacheArchive(symlinkFile.toURI(),conf); Assert.assertEquals(symlinkFile.toURI().toString(),conf.get(DistributedCache.CACHE_ARCHIVES)); Assert.assertEquals(1,DistributedCache.getCacheArchives(conf).length); Assert.assertEquals(symlinkFile.toURI(),DistributedCache.getCacheArchives(conf)[0]); DistributedCache.addCacheFile(symlinkFile.toURI(),conf); Assert.assertEquals(symlinkFile.toURI().toString(),conf.get(DistributedCache.CACHE_FILES)); Assert.assertEquals(1,DistributedCache.getCacheFiles(conf).length); Assert.assertEquals(symlinkFile.toURI(),DistributedCache.getCacheFiles(conf)[0]); }

Class: org.apache.hadoop.mapred.TestMaster

APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testGetMasterAddress(){ YarnConfiguration conf=new YarnConfiguration(); String masterHostname=Master.getMasterAddress(conf).getHostName(); InetSocketAddress rmAddr=NetUtils.createSocketAddr(YarnConfiguration.DEFAULT_RM_ADDRESS); assertEquals(masterHostname,rmAddr.getHostName()); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.CLASSIC_FRAMEWORK_NAME); conf.set(MRConfig.MASTER_ADDRESS,"local:invalid"); try { Master.getMasterAddress(conf); fail("Should not reach here as there is a bad master address"); } catch ( Exception e) { } conf.set(MRConfig.MASTER_ADDRESS,"bar.com:8042"); masterHostname=Master.getMasterAddress(conf).getHostName(); assertEquals(masterHostname,"bar.com"); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME); conf.set(YarnConfiguration.RM_ADDRESS,"foo1.com:8192"); masterHostname=Master.getMasterAddress(conf).getHostName(); assertEquals(masterHostname,"foo1.com"); }

InternalCallVerifier EqualityVerifier 
@Test public void testGetMasterUser(){ YarnConfiguration conf=new YarnConfiguration(); conf.set(MRConfig.MASTER_USER_NAME,"foo"); conf.set(YarnConfiguration.RM_PRINCIPAL,"bar"); assertEquals(Master.getMasterUserName(conf),"bar"); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.CLASSIC_FRAMEWORK_NAME); assertEquals(Master.getMasterUserName(conf),"foo"); conf.set(MRConfig.FRAMEWORK_NAME,MRConfig.YARN_FRAMEWORK_NAME); assertEquals(Master.getMasterUserName(conf),"bar"); }

Class: org.apache.hadoop.mapred.TestMiniMRClasspath

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testExternalWritable() throws IOException { String namenode=null; MiniDFSCluster dfs=null; MiniMRCluster mr=null; FileSystem fileSys=null; try { final int taskTrackers=4; Configuration conf=new Configuration(); dfs=new MiniDFSCluster.Builder(conf).build(); fileSys=dfs.getFileSystem(); namenode=fileSys.getUri().toString(); mr=new MiniMRCluster(taskTrackers,namenode,3); JobConf jobConf=mr.createJobConf(); String result; result=launchExternal(fileSys.getUri(),jobConf,"Dennis was here!\nDennis again!",3,1); Assert.assertEquals("Dennis again!\t1\nDennis was here!\t1\n",result); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testClassPath() throws IOException { String namenode=null; MiniDFSCluster dfs=null; MiniMRCluster mr=null; FileSystem fileSys=null; try { final int taskTrackers=4; final int jobTrackerPort=60050; Configuration conf=new Configuration(); dfs=new MiniDFSCluster.Builder(conf).build(); fileSys=dfs.getFileSystem(); namenode=fileSys.getUri().toString(); mr=new MiniMRCluster(taskTrackers,namenode,3); JobConf jobConf=mr.createJobConf(); String result; result=launchWordCount(fileSys.getUri(),jobConf,"The quick brown fox\nhas many silly\n" + "red fox sox\n",3,1); Assert.assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" + "quick\t1\nred\t1\nsilly\t1\nsox\t1\n",result); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }

Class: org.apache.hadoop.mapred.TestMiniMRClientCluster

InternalCallVerifier EqualityVerifier 
@Test public void testRestart() throws Exception { String rmAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS); String rmAdminAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_ADMIN_ADDRESS); String rmSchedAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_SCHEDULER_ADDRESS); String rmRstrackerAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS); String rmWebAppAddress1=mrCluster.getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS); String mrHistAddress1=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS); String mrHistWebAppAddress1=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS); mrCluster.restart(); String rmAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_ADDRESS); String rmAdminAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_ADMIN_ADDRESS); String rmSchedAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_SCHEDULER_ADDRESS); String rmRstrackerAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS); String rmWebAppAddress2=mrCluster.getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS); String mrHistAddress2=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_ADDRESS); String mrHistWebAppAddress2=mrCluster.getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS); assertEquals("Address before restart: " + rmAddress1 + " is different from new address: "+ rmAddress2,rmAddress1,rmAddress2); assertEquals("Address before restart: " + rmAdminAddress1 + " is different from new address: "+ rmAdminAddress2,rmAdminAddress1,rmAdminAddress2); assertEquals("Address before restart: " + rmSchedAddress1 + " is different from new address: "+ rmSchedAddress2,rmSchedAddress1,rmSchedAddress2); assertEquals("Address before restart: " + rmRstrackerAddress1 + " is different from new address: "+ rmRstrackerAddress2,rmRstrackerAddress1,rmRstrackerAddress2); assertEquals("Address before restart: " + rmWebAppAddress1 + " is different from new address: "+ rmWebAppAddress2,rmWebAppAddress1,rmWebAppAddress2); assertEquals("Address before restart: " + mrHistAddress1 + " is different from new address: "+ mrHistAddress2,mrHistAddress1,mrHistAddress2); assertEquals("Address before restart: " + mrHistWebAppAddress1 + " is different from new address: "+ mrHistWebAppAddress2,mrHistWebAppAddress1,mrHistWebAppAddress2); }

Class: org.apache.hadoop.mapred.TestNetworkedJob

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test JobConf * @throws Exception */ @SuppressWarnings("deprecation") @Test(timeout=500000) public void testNetworkedJob() throws Exception { MiniMRClientCluster mr=null; FileSystem fileSys=null; try { mr=createMiniClusterWithCapacityScheduler(); JobConf job=new JobConf(mr.getConfig()); fileSys=FileSystem.get(job); fileSys.delete(testDir,true); FSDataOutputStream out=fileSys.create(inFile,true); out.writeBytes("This is a test file"); out.close(); FileInputFormat.setInputPaths(job,inFile); FileOutputFormat.setOutputPath(job,outDir); job.setInputFormat(TextInputFormat.class); job.setOutputFormat(TextOutputFormat.class); job.setMapperClass(IdentityMapper.class); job.setReducerClass(IdentityReducer.class); job.setNumReduceTasks(0); JobClient client=new JobClient(mr.getConfig()); RunningJob rj=client.submitJob(job); JobID jobId=rj.getID(); NetworkedJob runningJob=(NetworkedJob)client.getJob(jobId); runningJob.setJobPriority(JobPriority.HIGH.name()); assertTrue(runningJob.getConfiguration().toString().endsWith("0001/job.xml")); assertEquals(runningJob.getID(),jobId); assertEquals(runningJob.getJobID(),jobId.toString()); assertEquals(runningJob.getJobName(),"N/A"); assertTrue(runningJob.getJobFile().endsWith(".staging/" + runningJob.getJobID() + "/job.xml")); assertTrue(runningJob.getTrackingURL().length() > 0); assertTrue(runningJob.mapProgress() == 0.0f); assertTrue(runningJob.reduceProgress() == 0.0f); assertTrue(runningJob.cleanupProgress() == 0.0f); assertTrue(runningJob.setupProgress() == 0.0f); TaskCompletionEvent[] tce=runningJob.getTaskCompletionEvents(0); assertEquals(tce.length,0); assertEquals(runningJob.getHistoryUrl(),""); assertFalse(runningJob.isRetired()); assertEquals(runningJob.getFailureInfo(),""); assertEquals(runningJob.getJobStatus().getJobName(),"N/A"); assertEquals(client.getMapTaskReports(jobId).length,0); try { client.getSetupTaskReports(jobId); } catch ( YarnRuntimeException e) { assertEquals(e.getMessage(),"Unrecognized task type: JOB_SETUP"); } try { client.getCleanupTaskReports(jobId); } catch ( YarnRuntimeException e) { assertEquals(e.getMessage(),"Unrecognized task type: JOB_CLEANUP"); } assertEquals(client.getReduceTaskReports(jobId).length,0); ClusterStatus status=client.getClusterStatus(true); assertEquals(status.getActiveTrackerNames().size(),2); assertEquals(status.getBlacklistedTrackers(),0); assertEquals(status.getBlacklistedTrackerNames().size(),0); assertEquals(status.getBlackListedTrackersInfo().size(),0); assertEquals(status.getJobTrackerStatus(),JobTrackerStatus.RUNNING); assertEquals(status.getMapTasks(),1); assertEquals(status.getMaxMapTasks(),20); assertEquals(status.getMaxReduceTasks(),4); assertEquals(status.getNumExcludedNodes(),0); assertEquals(status.getReduceTasks(),1); assertEquals(status.getTaskTrackers(),2); assertEquals(status.getTTExpiryInterval(),0); assertEquals(status.getJobTrackerStatus(),JobTrackerStatus.RUNNING); assertEquals(status.getGraylistedTrackers(),0); ByteArrayOutputStream dataOut=new ByteArrayOutputStream(); status.write(new DataOutputStream(dataOut)); ClusterStatus status2=new ClusterStatus(); status2.readFields(new DataInputStream(new ByteArrayInputStream(dataOut.toByteArray()))); assertEquals(status.getActiveTrackerNames(),status2.getActiveTrackerNames()); assertEquals(status.getBlackListedTrackersInfo(),status2.getBlackListedTrackersInfo()); assertEquals(status.getMapTasks(),status2.getMapTasks()); try { } catch ( RuntimeException e) { assertTrue(e.getMessage().endsWith("not found on CLASSPATH")); } JobClient.setTaskOutputFilter(job,TaskStatusFilter.ALL); assertEquals(JobClient.getTaskOutputFilter(job),TaskStatusFilter.ALL); assertEquals(client.getDefaultMaps(),20); assertEquals(client.getDefaultReduces(),4); assertEquals(client.getSystemDir().getName(),"jobSubmitDir"); JobQueueInfo[] rootQueueInfo=client.getRootQueues(); assertEquals(rootQueueInfo.length,1); assertEquals(rootQueueInfo[0].getQueueName(),"default"); JobQueueInfo[] qinfo=client.getQueues(); assertEquals(qinfo.length,1); assertEquals(qinfo[0].getQueueName(),"default"); assertEquals(client.getChildQueues("default").length,0); assertEquals(client.getJobsFromQueue("default").length,1); assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith("/job.xml")); JobQueueInfo qi=client.getQueueInfo("default"); assertEquals(qi.getQueueName(),"default"); assertEquals(qi.getQueueState(),"running"); QueueAclsInfo[] aai=client.getQueueAclsForCurrentUser(); assertEquals(aai.length,2); assertEquals(aai[0].getQueueName(),"root"); assertEquals(aai[1].getQueueName(),"default"); Token token=client.getDelegationToken(new Text(UserGroupInformation.getCurrentUser().getShortUserName())); assertEquals(token.getKind().toString(),"RM_DELEGATION_TOKEN"); assertEquals("Expected matching JobIDs",jobId,client.getJob(jobId).getJobStatus().getJobID()); assertEquals("Expected matching startTimes",rj.getJobStatus().getStartTime(),client.getJob(jobId).getJobStatus().getStartTime()); } finally { if (fileSys != null) { fileSys.delete(testDir,true); } if (mr != null) { mr.stop(); } } }

InternalCallVerifier EqualityVerifier 
/** * test BlackListInfo class * @throws IOException */ @Test(timeout=5000) public void testBlackListInfo() throws IOException { BlackListInfo info=new BlackListInfo(); info.setBlackListReport("blackListInfo"); info.setReasonForBlackListing("reasonForBlackListing"); info.setTrackerName("trackerName"); ByteArrayOutputStream byteOut=new ByteArrayOutputStream(); DataOutput out=new DataOutputStream(byteOut); info.write(out); BlackListInfo info2=new BlackListInfo(); info2.readFields(new DataInputStream(new ByteArrayInputStream(byteOut.toByteArray()))); assertEquals(info,info); assertEquals(info.toString(),info.toString()); assertEquals(info.getTrackerName(),"trackerName"); assertEquals(info.getReasonForBlackListing(),"reasonForBlackListing"); assertEquals(info.getBlackListReport(),"blackListInfo"); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=500000) public void testGetJobStatus() throws IOException, InterruptedException, ClassNotFoundException { MiniMRClientCluster mr=null; FileSystem fileSys=null; try { mr=createMiniClusterWithCapacityScheduler(); JobConf job=new JobConf(mr.getConfig()); fileSys=FileSystem.get(job); fileSys.delete(testDir,true); FSDataOutputStream out=fileSys.create(inFile,true); out.writeBytes("This is a test file"); out.close(); FileInputFormat.setInputPaths(job,inFile); FileOutputFormat.setOutputPath(job,outDir); job.setInputFormat(TextInputFormat.class); job.setOutputFormat(TextOutputFormat.class); job.setMapperClass(IdentityMapper.class); job.setReducerClass(IdentityReducer.class); job.setNumReduceTasks(0); JobClient client=new JobClient(mr.getConfig()); RunningJob rj=client.submitJob(job); JobID jobId=rj.getID(); assertEquals("Expected matching JobIDs",jobId,client.getJob(jobId).getJobStatus().getJobID()); assertEquals("Expected matching startTimes",rj.getJobStatus().getStartTime(),client.getJob(jobId).getJobStatus().getStartTime()); } finally { if (fileSys != null) { fileSys.delete(testDir,true); } if (mr != null) { mr.stop(); } } }

Class: org.apache.hadoop.mapred.TestOldCombinerGrouping

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testCombiner() throws Exception { if (!new File(TEST_ROOT_DIR).mkdirs()) { throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR); } File in=new File(TEST_ROOT_DIR,"input"); if (!in.mkdirs()) { throw new RuntimeException("Could not create test dir: " + in); } File out=new File(TEST_ROOT_DIR,"output"); PrintWriter pw=new PrintWriter(new FileWriter(new File(in,"data.txt"))); pw.println("A|a,1"); pw.println("A|b,2"); pw.println("B|a,3"); pw.println("B|b,4"); pw.println("B|c,5"); pw.close(); JobConf job=new JobConf(); job.set("mapreduce.framework.name","local"); TextInputFormat.setInputPaths(job,new Path(in.getPath())); TextOutputFormat.setOutputPath(job,new Path(out.getPath())); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setInputFormat(TextInputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); job.setOutputFormat(TextOutputFormat.class); job.setOutputValueGroupingComparator(GroupComparator.class); job.setCombinerClass(Combiner.class); job.setCombinerKeyGroupingComparator(GroupComparator.class); job.setInt("min.num.spills.for.combine",0); JobClient client=new JobClient(job); RunningJob runningJob=client.submitJob(job); runningJob.waitForCompletion(); if (runningJob.isSuccessful()) { Counters counters=runningJob.getCounters(); long combinerInputRecords=counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter").getCounter("COMBINE_INPUT_RECORDS"); long combinerOutputRecords=counters.getGroup("org.apache.hadoop.mapreduce.TaskCounter").getCounter("COMBINE_OUTPUT_RECORDS"); Assert.assertTrue(combinerInputRecords > 0); Assert.assertTrue(combinerInputRecords > combinerOutputRecords); BufferedReader br=new BufferedReader(new FileReader(new File(out,"part-00000"))); Set output=new HashSet(); String line=br.readLine(); Assert.assertNotNull(line); output.add(line.substring(0,1) + line.substring(4,5)); line=br.readLine(); Assert.assertNotNull(line); output.add(line.substring(0,1) + line.substring(4,5)); line=br.readLine(); Assert.assertNull(line); br.close(); Set expected=new HashSet(); expected.add("A2"); expected.add("B5"); Assert.assertEquals(expected,output); } else { Assert.fail("Job failed"); } }

Class: org.apache.hadoop.mapred.TestOldMethodsJobID

InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test Reporter.NULL */ @Test(timeout=5000) public void testReporter(){ Reporter nullReporter=Reporter.NULL; assertNull(nullReporter.getCounter(null)); assertNull(nullReporter.getCounter("group","name")); try { assertNull(nullReporter.getInputSplit()); } catch ( UnsupportedOperationException e) { assertEquals("NULL reporter has no input",e.getMessage()); } assertEquals(0,nullReporter.getProgress(),0.01); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * test deprecated methods of TaskCompletionEvent */ @SuppressWarnings("deprecation") @Test(timeout=5000) public void testTaskCompletionEvent(){ TaskAttemptID taid=new TaskAttemptID("001",1,TaskType.REDUCE,2,3); TaskCompletionEvent template=new TaskCompletionEvent(12,taid,13,true,Status.SUCCEEDED,"httptracker"); TaskCompletionEvent testEl=TaskCompletionEvent.downgrade(template); testEl.setTaskAttemptId(taid); testEl.setTaskTrackerHttp("httpTracker"); testEl.setTaskId("attempt_001_0001_m_000002_04"); assertEquals("attempt_001_0001_m_000002_4",testEl.getTaskId()); testEl.setTaskStatus(Status.OBSOLETE); assertEquals(Status.OBSOLETE.toString(),testEl.getStatus().toString()); testEl.setTaskRunTime(20); assertEquals(testEl.getTaskRunTime(),20); testEl.setEventId(16); assertEquals(testEl.getEventId(),16); }

InternalCallVerifier EqualityVerifier 
/** * test depricated methods of JobProfile * @throws IOException */ @SuppressWarnings("deprecation") @Test(timeout=5000) public void testJobProfile() throws IOException { JobProfile profile=new JobProfile("user","job_001_03","jobFile","uri","name"); assertEquals("job_001_0003",profile.getJobId()); assertEquals("default",profile.getQueueName()); ByteArrayOutputStream out=new ByteArrayOutputStream(); profile.write(new DataOutputStream(out)); JobProfile profile2=new JobProfile(); profile2.readFields(new DataInputStream(new ByteArrayInputStream(out.toByteArray()))); assertEquals(profile2.name,profile.name); assertEquals(profile2.jobFile,profile.jobFile); assertEquals(profile2.queueName,profile.queueName); assertEquals(profile2.url,profile.url); assertEquals(profile2.user,profile.user); }

InternalCallVerifier EqualityVerifier 
/** * test JobID * @throws IOException */ @SuppressWarnings("deprecation") @Test(timeout=5000) public void testJobID() throws IOException { JobID jid=new JobID("001",2); ByteArrayOutputStream out=new ByteArrayOutputStream(); jid.write(new DataOutputStream(out)); assertEquals(jid,JobID.read(new DataInputStream(new ByteArrayInputStream(out.toByteArray())))); assertEquals("job_001_0001",JobID.getJobIDsPattern("001",1)); }

EqualityVerifier 
/** * test TaskAttemptID */ @SuppressWarnings("deprecation") @Test(timeout=5000) public void testTaskAttemptID(){ TaskAttemptID task=new TaskAttemptID("001",2,true,3,4); assertEquals("attempt_001_0002_m_000003_4",TaskAttemptID.getTaskAttemptIDsPattern("001",2,true,3,4)); assertEquals("task_001_0002_m_000003",task.getTaskID().toString()); assertEquals("attempt_001_0001_r_000002_3",TaskAttemptID.getTaskAttemptIDsPattern("001",1,TaskType.REDUCE,2,3)); assertEquals("001_0001_m_000001_2",TaskAttemptID.getTaskAttemptIDsPatternWOPrefix("001",1,TaskType.MAP,1,2).toString()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * test deprecated methods of TaskID * @throws IOException */ @SuppressWarnings("deprecation") @Test(timeout=5000) public void testDepricatedMethods() throws IOException { JobID jid=new JobID(); TaskID test=new TaskID(jid,true,1); assertEquals(test.getTaskType(),TaskType.MAP); test=new TaskID(jid,false,1); assertEquals(test.getTaskType(),TaskType.REDUCE); test=new TaskID("001",1,false,1); assertEquals(test.getTaskType(),TaskType.REDUCE); test=new TaskID("001",1,true,1); assertEquals(test.getTaskType(),TaskType.MAP); ByteArrayOutputStream out=new ByteArrayOutputStream(); test.write(new DataOutputStream(out)); TaskID ti=TaskID.read(new DataInputStream(new ByteArrayInputStream(out.toByteArray()))); assertEquals(ti.toString(),test.toString()); assertEquals("task_001_0001_m_000002",TaskID.getTaskIDsPattern("001",1,true,2)); assertEquals("task_003_0001_m_000004",TaskID.getTaskIDsPattern("003",1,TaskType.MAP,4)); assertEquals("003_0001_m_000004",TaskID.getTaskIDsPatternWOPrefix("003",1,TaskType.MAP,4).toString()); }

Class: org.apache.hadoop.mapred.TestQueue

EqualityVerifier 
@Test(timeout=5000) public void testDefaultConfig(){ QueueManager manager=new QueueManager(true); assertEquals(manager.getRoot().getChildren().size(),2); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * test QueueManager * configuration from file * @throws IOException */ @Test(timeout=5000) public void testQueue() throws IOException { File f=null; try { f=writeFile(); QueueManager manager=new QueueManager(f.getCanonicalPath(),true); manager.setSchedulerInfo("first","queueInfo"); manager.setSchedulerInfo("second","queueInfoqueueInfo"); Queue root=manager.getRoot(); assertTrue(root.getChildren().size() == 2); Iterator iterator=root.getChildren().iterator(); Queue firstSubQueue=iterator.next(); assertTrue(firstSubQueue.getName().equals("first")); assertEquals(firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job").toString(),"Users [user1, user2] and members of the groups [group1, group2] are allowed"); Queue secondSubQueue=iterator.next(); assertTrue(secondSubQueue.getName().equals("second")); assertEquals(secondSubQueue.getProperties().getProperty("key"),"value"); assertEquals(secondSubQueue.getProperties().getProperty("key1"),"value1"); assertEquals(firstSubQueue.getState().getStateName(),"running"); assertEquals(secondSubQueue.getState().getStateName(),"stopped"); Set template=new HashSet(); template.add("first"); template.add("second"); assertEquals(manager.getLeafQueueNames(),template); UserGroupInformation mockUGI=mock(UserGroupInformation.class); when(mockUGI.getShortUserName()).thenReturn("user1"); String[] groups={"group1"}; when(mockUGI.getGroupNames()).thenReturn(groups); assertTrue(manager.hasAccess("first",QueueACL.SUBMIT_JOB,mockUGI)); assertFalse(manager.hasAccess("second",QueueACL.SUBMIT_JOB,mockUGI)); assertFalse(manager.hasAccess("first",QueueACL.ADMINISTER_JOBS,mockUGI)); when(mockUGI.getShortUserName()).thenReturn("user3"); assertTrue(manager.hasAccess("first",QueueACL.ADMINISTER_JOBS,mockUGI)); QueueAclsInfo[] qai=manager.getQueueAcls(mockUGI); assertEquals(qai.length,1); manager.refreshQueues(getConfiguration(),null); iterator=root.getChildren().iterator(); Queue firstSubQueue1=iterator.next(); Queue secondSubQueue1=iterator.next(); assertTrue(firstSubQueue.equals(firstSubQueue1)); assertEquals(firstSubQueue1.getState().getStateName(),"running"); assertEquals(secondSubQueue1.getState().getStateName(),"stopped"); assertEquals(firstSubQueue1.getSchedulingInfo(),"queueInfo"); assertEquals(secondSubQueue1.getSchedulingInfo(),"queueInfoqueueInfo"); assertEquals(firstSubQueue.getJobQueueInfo().getQueueName(),"first"); assertEquals(firstSubQueue.getJobQueueInfo().getQueueState(),"running"); assertEquals(firstSubQueue.getJobQueueInfo().getSchedulingInfo(),"queueInfo"); assertEquals(secondSubQueue.getJobQueueInfo().getChildren().size(),0); assertEquals(manager.getSchedulerInfo("first"),"queueInfo"); Set queueJobQueueInfos=new HashSet(); for ( JobQueueInfo jobInfo : manager.getJobQueueInfos()) { queueJobQueueInfos.add(jobInfo.getQueueName()); } Set rootJobQueueInfos=new HashSet(); for ( Queue queue : root.getChildren()) { rootJobQueueInfos.add(queue.getJobQueueInfo().getQueueName()); } assertEquals(queueJobQueueInfos,rootJobQueueInfos); assertEquals(manager.getJobQueueInfoMapping().get("first").getQueueName(),"first"); Writer writer=new StringWriter(); Configuration conf=getConfiguration(); conf.unset(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY); QueueManager.dumpConfiguration(writer,f.getAbsolutePath(),conf); String result=writer.toString(); assertTrue(result.indexOf("\"name\":\"first\",\"state\":\"running\",\"acl_submit_job\":\"user1,user2 group1,group2\",\"acl_administer_jobs\":\"user3,user4 group3,group4\",\"properties\":[],\"children\":[]") > 0); writer=new StringWriter(); QueueManager.dumpConfiguration(writer,conf); result=writer.toString(); assertEquals("{\"queues\":[{\"name\":\"default\",\"state\":\"running\",\"acl_submit_job\":\"*\",\"acl_administer_jobs\":\"*\",\"properties\":[],\"children\":[]},{\"name\":\"q1\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[],\"children\":[{\"name\":\"q1:q2\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[{\"key\":\"capacity\",\"value\":\"20\"},{\"key\":\"user-limit\",\"value\":\"30\"}],\"children\":[]}]}]}",result); QueueAclsInfo qi=new QueueAclsInfo(); assertNull(qi.getQueueName()); } finally { if (f != null) { f.delete(); } } }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test for Qmanager with empty configuration * @throws IOException */ @Test(timeout=5000) public void test2Queue() throws IOException { Configuration conf=getConfiguration(); QueueManager manager=new QueueManager(conf); manager.setSchedulerInfo("first","queueInfo"); manager.setSchedulerInfo("second","queueInfoqueueInfo"); Queue root=manager.getRoot(); assertTrue(root.getChildren().size() == 2); Iterator iterator=root.getChildren().iterator(); Queue firstSubQueue=iterator.next(); assertTrue(firstSubQueue.getName().equals("first")); assertEquals(firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job").toString(),"Users [user1, user2] and members of the groups [group1, group2] are allowed"); Queue secondSubQueue=iterator.next(); assertTrue(secondSubQueue.getName().equals("second")); assertEquals(firstSubQueue.getState().getStateName(),"running"); assertEquals(secondSubQueue.getState().getStateName(),"stopped"); assertTrue(manager.isRunning("first")); assertFalse(manager.isRunning("second")); assertEquals(firstSubQueue.getSchedulingInfo(),"queueInfo"); assertEquals(secondSubQueue.getSchedulingInfo(),"queueInfoqueueInfo"); Set template=new HashSet(); template.add("first"); template.add("second"); assertEquals(manager.getLeafQueueNames(),template); }

Class: org.apache.hadoop.mapred.TestResourceMgrDelegate

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void tesAllJobs() throws Exception { final ApplicationClientProtocol applicationsManager=Mockito.mock(ApplicationClientProtocol.class); GetApplicationsResponse allApplicationsResponse=Records.newRecord(GetApplicationsResponse.class); List applications=new ArrayList(); applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.FAILED)); applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.SUCCEEDED)); applications.add(getApplicationReport(YarnApplicationState.FINISHED,FinalApplicationStatus.KILLED)); applications.add(getApplicationReport(YarnApplicationState.FAILED,FinalApplicationStatus.FAILED)); allApplicationsResponse.setApplicationList(applications); Mockito.when(applicationsManager.getApplications(Mockito.any(GetApplicationsRequest.class))).thenReturn(allApplicationsResponse); ResourceMgrDelegate resourceMgrDelegate=new ResourceMgrDelegate(new YarnConfiguration()){ @Override protected void serviceStart() throws Exception { Assert.assertTrue(this.client instanceof YarnClientImpl); ((YarnClientImpl)this.client).setRMClient(applicationsManager); } } ; JobStatus[] allJobs=resourceMgrDelegate.getAllJobs(); Assert.assertEquals(State.FAILED,allJobs[0].getState()); Assert.assertEquals(State.SUCCEEDED,allJobs[1].getState()); Assert.assertEquals(State.KILLED,allJobs[2].getState()); Assert.assertEquals(State.FAILED,allJobs[3].getState()); }

Class: org.apache.hadoop.mapred.TestShuffleHandler

UtilityVerifier EqualityVerifier HybridVerifier 
/** * Validate the limit on number of shuffle connections. * @throws Exception exception */ @Test(timeout=10000) public void testMaxConnections() throws Exception { Configuration conf=new Configuration(); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0); conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3); ShuffleHandler shuffleHandler=new ShuffleHandler(){ @Override protected Shuffle getShuffle( Configuration conf){ return new Shuffle(conf){ @Override protected MapOutputInfo getMapOutputInfo( String base, String mapId, int reduce, String user) throws IOException { return null; } @Override protected void populateHeaders( List mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map infoMap) throws IOException { } @Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException { } @Override protected ChannelFuture sendMapOutput( ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException { ShuffleHeader header=new ShuffleHeader("dummy_header",5678,5678,1); DataOutputBuffer dob=new DataOutputBuffer(); header.write(dob); ch.write(wrappedBuffer(dob.getData(),0,dob.getLength())); dob=new DataOutputBuffer(); for (int i=0; i < 100000; ++i) { header.write(dob); } return ch.write(wrappedBuffer(dob.getData(),0,dob.getLength())); } } ; } } ; shuffleHandler.init(conf); shuffleHandler.start(); int connAttempts=3; HttpURLConnection conns[]=new HttpURLConnection[connAttempts]; for (int i=0; i < connAttempts; i++) { String URLstring="http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_"+ i+ "_0"; URL url=new URL(URLstring); conns[i]=(HttpURLConnection)url.openConnection(); conns[i].setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME); conns[i].setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION); } for (int i=0; i < connAttempts; i++) { conns[i].connect(); } conns[0].getInputStream(); int rc=conns[0].getResponseCode(); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); conns[1].getInputStream(); rc=conns[1].getResponseCode(); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); try { conns[2].getInputStream(); rc=conns[2].getResponseCode(); Assert.fail("Expected a SocketException"); } catch ( SocketException se) { LOG.info("Expected - connection should not be open"); } catch ( Exception e) { Assert.fail("Expected a SocketException"); } shuffleHandler.stop(); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test public void testRecovery() throws IOException { final String user="someuser"; final ApplicationId appId=ApplicationId.newInstance(12345,1); final JobID jobId=JobID.downgrade(TypeConverter.fromYarn(appId)); final File tmpDir=new File(System.getProperty("test.build.data",System.getProperty("java.io.tmpdir")),TestShuffleHandler.class.getName()); Configuration conf=new Configuration(); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0); conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3); ShuffleHandler shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); tmpDir.mkdirs(); try { shuffle.init(conf); shuffle.start(); DataOutputBuffer outputBuffer=new DataOutputBuffer(); outputBuffer.reset(); Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService")); jt.write(outputBuffer); shuffle.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength()))); int rc=getShuffleResponseCode(shuffle,jt); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); shuffle.close(); shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); shuffle.init(conf); shuffle.start(); rc=getShuffleResponseCode(shuffle,jt); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); shuffle.stopApplication(new ApplicationTerminationContext(appId)); rc=getShuffleResponseCode(shuffle,jt); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,rc); shuffle.close(); shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); shuffle.init(conf); shuffle.start(); rc=getShuffleResponseCode(shuffle,jt); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,rc); } finally { if (shuffle != null) { shuffle.close(); } FileUtil.fullyDelete(tmpDir); } }

EqualityVerifier 
/** * Test the validation of ShuffleHandler's meta-data's serialization and * de-serialization. * @throws Exception exception */ @Test(timeout=10000) public void testSerializeMeta() throws Exception { assertEquals(1,ShuffleHandler.deserializeMetaData(ShuffleHandler.serializeMetaData(1))); assertEquals(-1,ShuffleHandler.deserializeMetaData(ShuffleHandler.serializeMetaData(-1))); assertEquals(8080,ShuffleHandler.deserializeMetaData(ShuffleHandler.serializeMetaData(8080))); }

APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testRecoveryFromOtherVersions() throws IOException { final String user="someuser"; final ApplicationId appId=ApplicationId.newInstance(12345,1); final File tmpDir=new File(System.getProperty("test.build.data",System.getProperty("java.io.tmpdir")),TestShuffleHandler.class.getName()); Configuration conf=new Configuration(); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0); conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS,3); ShuffleHandler shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); tmpDir.mkdirs(); try { shuffle.init(conf); shuffle.start(); DataOutputBuffer outputBuffer=new DataOutputBuffer(); outputBuffer.reset(); Token jt=new Token("identifier".getBytes(),"password".getBytes(),new Text(user),new Text("shuffleService")); jt.write(outputBuffer); shuffle.initializeApplication(new ApplicationInitializationContext(user,appId,ByteBuffer.wrap(outputBuffer.getData(),0,outputBuffer.getLength()))); int rc=getShuffleResponseCode(shuffle,jt); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); shuffle.close(); shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); shuffle.init(conf); shuffle.start(); rc=getShuffleResponseCode(shuffle,jt); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); Version version=Version.newInstance(1,0); Assert.assertEquals(version,shuffle.getCurrentVersion()); Version version11=Version.newInstance(1,1); shuffle.storeVersion(version11); Assert.assertEquals(version11,shuffle.loadVersion()); shuffle.close(); shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); shuffle.init(conf); shuffle.start(); Assert.assertEquals(version,shuffle.loadVersion()); rc=getShuffleResponseCode(shuffle,jt); Assert.assertEquals(HttpURLConnection.HTTP_OK,rc); Version version21=Version.newInstance(2,1); shuffle.storeVersion(version21); Assert.assertEquals(version21,shuffle.loadVersion()); shuffle.close(); shuffle=new ShuffleHandler(); shuffle.setRecoveryPath(new Path(tmpDir.toString())); shuffle.init(conf); try { shuffle.start(); Assert.fail("Incompatible version, should expect fail here."); } catch ( ServiceStateException e) { Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for state DB schema:")); } } finally { if (shuffle != null) { shuffle.close(); } FileUtil.fullyDelete(tmpDir); } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * simulate a reducer that sends an invalid shuffle-header - sometimes a wrong * header_name and sometimes a wrong version * @throws Exception exception */ @Test(timeout=10000) public void testIncompatibleShuffleVersion() throws Exception { final int failureNum=3; Configuration conf=new Configuration(); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0); ShuffleHandler shuffleHandler=new ShuffleHandler(); shuffleHandler.init(conf); shuffleHandler.start(); URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0"); for (int i=0; i < failureNum; ++i) { HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,i == 0 ? "mapreduce" : "other"); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,i == 1 ? "1.0.0" : "1.0.1"); conn.connect(); Assert.assertEquals(HttpURLConnection.HTTP_BAD_REQUEST,conn.getResponseCode()); } shuffleHandler.stop(); shuffleHandler.close(); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Verify client prematurely closing a connection. * @throws Exception exception. */ @Test(timeout=10000) public void testClientClosesConnection() throws Exception { final ArrayList failures=new ArrayList(1); Configuration conf=new Configuration(); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0); ShuffleHandler shuffleHandler=new ShuffleHandler(){ @Override protected Shuffle getShuffle( Configuration conf){ return new Shuffle(conf){ @Override protected MapOutputInfo getMapOutputInfo( String base, String mapId, int reduce, String user) throws IOException { return null; } @Override protected void populateHeaders( List mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map infoMap) throws IOException { super.setResponseHeaders(response,keepAliveParam,100); } @Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException { } @Override protected ChannelFuture sendMapOutput( ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException { ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1); DataOutputBuffer dob=new DataOutputBuffer(); header.write(dob); ch.write(wrappedBuffer(dob.getData(),0,dob.getLength())); dob=new DataOutputBuffer(); for (int i=0; i < 100000; ++i) { header.write(dob); } return ch.write(wrappedBuffer(dob.getData(),0,dob.getLength())); } @Override protected void sendError( ChannelHandlerContext ctx, HttpResponseStatus status){ if (failures.size() == 0) { failures.add(new Error()); ctx.getChannel().close(); } } @Override protected void sendError( ChannelHandlerContext ctx, String message, HttpResponseStatus status){ if (failures.size() == 0) { failures.add(new Error()); ctx.getChannel().close(); } } } ; } } ; shuffleHandler.init(conf); shuffleHandler.start(); URL url=new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION); conn.connect(); DataInputStream input=new DataInputStream(conn.getInputStream()); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); Assert.assertEquals("close",conn.getHeaderField(HttpHeaders.CONNECTION)); ShuffleHeader header=new ShuffleHeader(); header.readFields(input); input.close(); shuffleHandler.stop(); Assert.assertTrue("sendError called when client closed connection",failures.size() == 0); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testKeepAlive() throws Exception { final ArrayList failures=new ArrayList(1); Configuration conf=new Configuration(); conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY,0); conf.setBoolean(ShuffleHandler.SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED,true); conf.setInt(ShuffleHandler.SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT,-100); ShuffleHandler shuffleHandler=new ShuffleHandler(){ @Override protected Shuffle getShuffle( final Configuration conf){ return new Shuffle(conf){ @Override protected MapOutputInfo getMapOutputInfo( String base, String mapId, int reduce, String user) throws IOException { return null; } @Override protected void verifyRequest( String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException { } @Override protected void populateHeaders( List mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map infoMap) throws IOException { ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1); DataOutputBuffer dob=new DataOutputBuffer(); header.write(dob); dob=new DataOutputBuffer(); for (int i=0; i < 100000; ++i) { header.write(dob); } long contentLength=dob.getLength(); if (keepAliveParam) { connectionKeepAliveEnabled=false; } super.setResponseHeaders(response,keepAliveParam,contentLength); } @Override protected ChannelFuture sendMapOutput( ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException { HttpResponse response=new DefaultHttpResponse(HTTP_1_1,OK); ShuffleHeader header=new ShuffleHeader("attempt_12345_1_m_1_0",5678,5678,1); DataOutputBuffer dob=new DataOutputBuffer(); header.write(dob); ch.write(wrappedBuffer(dob.getData(),0,dob.getLength())); dob=new DataOutputBuffer(); for (int i=0; i < 100000; ++i) { header.write(dob); } return ch.write(wrappedBuffer(dob.getData(),0,dob.getLength())); } @Override protected void sendError( ChannelHandlerContext ctx, HttpResponseStatus status){ if (failures.size() == 0) { failures.add(new Error()); ctx.getChannel().close(); } } @Override protected void sendError( ChannelHandlerContext ctx, String message, HttpResponseStatus status){ if (failures.size() == 0) { failures.add(new Error()); ctx.getChannel().close(); } } } ; } } ; shuffleHandler.init(conf); shuffleHandler.start(); String shuffleBaseURL="http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY); URL url=new URL(shuffleBaseURL + "/mapOutput?job=job_12345_1&reduce=1&" + "map=attempt_12345_1_m_1_0"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION); conn.connect(); DataInputStream input=new DataInputStream(conn.getInputStream()); Assert.assertEquals(HttpHeaders.KEEP_ALIVE,conn.getHeaderField(HttpHeaders.CONNECTION)); Assert.assertEquals("timeout=1",conn.getHeaderField(HttpHeaders.KEEP_ALIVE)); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); ShuffleHeader header=new ShuffleHeader(); header.readFields(input); input.close(); url=new URL(shuffleBaseURL + "/mapOutput?job=job_12345_1&reduce=1&" + "map=attempt_12345_1_m_1_0&keepAlive=true"); conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,ShuffleHeader.DEFAULT_HTTP_HEADER_NAME); conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION); conn.connect(); input=new DataInputStream(conn.getInputStream()); Assert.assertEquals(HttpHeaders.KEEP_ALIVE,conn.getHeaderField(HttpHeaders.CONNECTION)); Assert.assertEquals("timeout=1",conn.getHeaderField(HttpHeaders.KEEP_ALIVE)); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); header=new ShuffleHeader(); header.readFields(input); input.close(); }

Class: org.apache.hadoop.mapred.TestSkipBadRecords

BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testSkipBadRecords(){ Configuration conf=new Configuration(); assertEquals(2,SkipBadRecords.getAttemptsToStartSkipping(conf)); assertTrue(SkipBadRecords.getAutoIncrMapperProcCount(conf)); assertTrue(SkipBadRecords.getAutoIncrReducerProcCount(conf)); assertEquals(0,SkipBadRecords.getMapperMaxSkipRecords(conf)); assertEquals(0,SkipBadRecords.getReducerMaxSkipGroups(conf),0); assertNull(SkipBadRecords.getSkipOutputPath(conf)); SkipBadRecords.setAttemptsToStartSkipping(conf,5); SkipBadRecords.setAutoIncrMapperProcCount(conf,false); SkipBadRecords.setAutoIncrReducerProcCount(conf,false); SkipBadRecords.setMapperMaxSkipRecords(conf,6L); SkipBadRecords.setReducerMaxSkipGroups(conf,7L); JobConf jc=new JobConf(); SkipBadRecords.setSkipOutputPath(jc,new Path("test")); assertEquals(5,SkipBadRecords.getAttemptsToStartSkipping(conf)); assertFalse(SkipBadRecords.getAutoIncrMapperProcCount(conf)); assertFalse(SkipBadRecords.getAutoIncrReducerProcCount(conf)); assertEquals(6L,SkipBadRecords.getMapperMaxSkipRecords(conf)); assertEquals(7L,SkipBadRecords.getReducerMaxSkipGroups(conf),0); assertEquals("test",SkipBadRecords.getSkipOutputPath(jc).toString()); }

Class: org.apache.hadoop.mapred.TestTaskAttemptListenerImpl

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testCheckpointIDTracking() throws IOException, InterruptedException { SystemClock clock=new SystemClock(); org.apache.hadoop.mapreduce.v2.app.job.Task mockTask=mock(org.apache.hadoop.mapreduce.v2.app.job.Task.class); when(mockTask.canCommit(any(TaskAttemptId.class))).thenReturn(true); Job mockJob=mock(Job.class); when(mockJob.getTask(any(TaskId.class))).thenReturn(mockTask); Dispatcher dispatcher=mock(Dispatcher.class); EventHandler ea=mock(EventHandler.class); when(dispatcher.getEventHandler()).thenReturn(ea); RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class); AppContext appCtx=mock(AppContext.class); when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob); when(appCtx.getClock()).thenReturn(clock); when(appCtx.getEventHandler()).thenReturn(ea); JobTokenSecretManager secret=mock(JobTokenSecretManager.class); final TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class); when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy(); policy.init(appCtx); TaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,policy){ @Override protected void registerHeartbeatHandler( Configuration conf){ taskHeartbeatHandler=hbHandler; } } ; Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.TASK_PREEMPTION,true); listener.init(conf); listener.start(); TaskAttemptID tid=new TaskAttemptID("12345",1,TaskType.REDUCE,1,0); List partialOut=new ArrayList(); partialOut.add(new Path("/prev1")); partialOut.add(new Path("/prev2")); Counters counters=mock(Counters.class); final long CBYTES=64L * 1024 * 1024; final long CTIME=4344L; final Path CLOC=new Path("/test/1"); Counter cbytes=mock(Counter.class); when(cbytes.getValue()).thenReturn(CBYTES); Counter ctime=mock(Counter.class); when(ctime.getValue()).thenReturn(CTIME); when(counters.findCounter(eq(EnumCounter.CHECKPOINT_BYTES))).thenReturn(cbytes); when(counters.findCounter(eq(EnumCounter.CHECKPOINT_MS))).thenReturn(ctime); TaskCheckpointID incid=new TaskCheckpointID(new FSCheckpointID(CLOC),partialOut,counters); listener.setCheckpointID(org.apache.hadoop.mapred.TaskID.downgrade(tid.getTaskID()),incid); CheckpointID outcid=listener.getCheckpointID(tid.getTaskID()); TaskCheckpointID tcid=(TaskCheckpointID)outcid; assertEquals(CBYTES,tcid.getCheckpointBytes()); assertEquals(CTIME,tcid.getCheckpointTime()); assertTrue(partialOut.containsAll(tcid.getPartialCommittedOutput())); assertTrue(tcid.getPartialCommittedOutput().containsAll(partialOut)); assert outcid == incid; listener.stop(); }

UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test(timeout=5000) public void testGetTask() throws IOException { AppContext appCtx=mock(AppContext.class); JobTokenSecretManager secret=mock(JobTokenSecretManager.class); RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class); TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class); Dispatcher dispatcher=mock(Dispatcher.class); EventHandler ea=mock(EventHandler.class); when(dispatcher.getEventHandler()).thenReturn(ea); when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy(); policy.init(appCtx); MockTaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,hbHandler,policy); Configuration conf=new Configuration(); listener.init(conf); listener.start(); JVMId id=new JVMId("foo",1,true,1); WrappedJvmID wid=new WrappedJvmID(id.getJobId(),id.isMap,id.getId()); JvmContext context=new JvmContext(); context.jvmId=id; JvmTask result=listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); TaskAttemptId attemptID=mock(TaskAttemptId.class); Task task=mock(Task.class); listener.registerPendingTask(task,wid); result=listener.getTask(context); assertNull(result); listener.unregister(attemptID,wid); listener.registerPendingTask(task,wid); listener.registerLaunchedTask(attemptID,wid); verify(hbHandler).register(attemptID); result=listener.getTask(context); assertNotNull(result); assertFalse(result.shouldDie); result=listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); listener.unregister(attemptID,wid); result=listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); listener.stop(); JVMId jvmid=JVMId.forName("jvm_001_002_m_004"); assertNotNull(jvmid); try { JVMId.forName("jvm_001_002_m_004_006"); fail(); } catch ( IllegalArgumentException e) { assertEquals(e.getMessage(),"TaskId string : jvm_001_002_m_004_006 is not properly formed"); } }

InternalCallVerifier EqualityVerifier 
@Test(timeout=10000) public void testGetMapCompletionEvents() throws IOException { TaskAttemptCompletionEvent[] empty={}; TaskAttemptCompletionEvent[] taskEvents={createTce(0,true,TaskAttemptCompletionEventStatus.OBSOLETE),createTce(1,false,TaskAttemptCompletionEventStatus.FAILED),createTce(2,true,TaskAttemptCompletionEventStatus.SUCCEEDED),createTce(3,false,TaskAttemptCompletionEventStatus.FAILED)}; TaskAttemptCompletionEvent[] mapEvents={taskEvents[0],taskEvents[2]}; Job mockJob=mock(Job.class); when(mockJob.getTaskAttemptCompletionEvents(0,100)).thenReturn(taskEvents); when(mockJob.getTaskAttemptCompletionEvents(0,2)).thenReturn(Arrays.copyOfRange(taskEvents,0,2)); when(mockJob.getTaskAttemptCompletionEvents(2,100)).thenReturn(Arrays.copyOfRange(taskEvents,2,4)); when(mockJob.getMapAttemptCompletionEvents(0,100)).thenReturn(TypeConverter.fromYarn(mapEvents)); when(mockJob.getMapAttemptCompletionEvents(0,2)).thenReturn(TypeConverter.fromYarn(mapEvents)); when(mockJob.getMapAttemptCompletionEvents(2,100)).thenReturn(TypeConverter.fromYarn(empty)); AppContext appCtx=mock(AppContext.class); when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob); JobTokenSecretManager secret=mock(JobTokenSecretManager.class); RMHeartbeatHandler rmHeartbeatHandler=mock(RMHeartbeatHandler.class); final TaskHeartbeatHandler hbHandler=mock(TaskHeartbeatHandler.class); Dispatcher dispatcher=mock(Dispatcher.class); EventHandler ea=mock(EventHandler.class); when(dispatcher.getEventHandler()).thenReturn(ea); when(appCtx.getEventHandler()).thenReturn(ea); CheckpointAMPreemptionPolicy policy=new CheckpointAMPreemptionPolicy(); policy.init(appCtx); TaskAttemptListenerImpl listener=new MockTaskAttemptListenerImpl(appCtx,secret,rmHeartbeatHandler,policy){ @Override protected void registerHeartbeatHandler( Configuration conf){ taskHeartbeatHandler=hbHandler; } } ; Configuration conf=new Configuration(); listener.init(conf); listener.start(); JobID jid=new JobID("12345",1); TaskAttemptID tid=new TaskAttemptID("12345",1,TaskType.REDUCE,1,0); MapTaskCompletionEventsUpdate update=listener.getMapCompletionEvents(jid,0,100,tid); assertEquals(2,update.events.length); update=listener.getMapCompletionEvents(jid,0,2,tid); assertEquals(2,update.events.length); update=listener.getMapCompletionEvents(jid,2,100,tid); assertEquals(0,update.events.length); }

EqualityVerifier 
@Test(timeout=5000) public void testJVMId(){ JVMId jvmid=new JVMId("test",1,true,2); JVMId jvmid1=JVMId.forName("jvm_test_0001_m_000002"); assertEquals(0,jvmid.compareTo(jvmid1)); }

Class: org.apache.hadoop.mapred.TestTaskLog

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test without TASK_LOG_DIR * @throws IOException */ @Test(timeout=50000) public void testTaskLogWithoutTaskLogDir() throws IOException { System.clearProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR); assertEquals(TaskLog.getMRv2LogDir(),null); TaskAttemptID taid=mock(TaskAttemptID.class); JobID jid=new JobID("job",1); when(taid.getJobID()).thenReturn(jid); when(taid.toString()).thenReturn("JobId"); File f=TaskLog.getTaskLogFile(taid,true,LogName.STDOUT); assertTrue(f.getAbsolutePath().endsWith("stdout")); }

APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test TaskAttemptID * @throws IOException */ @Test(timeout=50000) public void testTaskLog() throws IOException { System.setProperty(YarnConfiguration.YARN_APP_CONTAINER_LOG_DIR,"testString"); assertEquals(TaskLog.getMRv2LogDir(),"testString"); TaskAttemptID taid=mock(TaskAttemptID.class); JobID jid=new JobID("job",1); when(taid.getJobID()).thenReturn(jid); when(taid.toString()).thenReturn("JobId"); File f=TaskLog.getTaskLogFile(taid,true,LogName.STDOUT); assertTrue(f.getAbsolutePath().endsWith("testString" + File.separatorChar + "stdout")); File indexFile=TaskLog.getIndexFile(taid,true); if (!indexFile.getParentFile().exists()) { indexFile.getParentFile().mkdirs(); } indexFile.delete(); indexFile.createNewFile(); TaskLog.syncLogs("location",taid,true); assertTrue(indexFile.getAbsolutePath().endsWith("userlogs" + File.separatorChar + "job_job_0001"+ File.separatorChar+ "JobId.cleanup"+ File.separatorChar+ "log.index")); f=TaskLog.getRealTaskLogFileLocation(taid,true,LogName.DEBUGOUT); if (f != null) { assertTrue(f.getAbsolutePath().endsWith("location" + File.separatorChar + "debugout")); FileUtils.copyFile(indexFile,f); } assertTrue(TaskLog.obtainLogDirOwner(taid).length() > 0); assertTrue(readTaskLog(TaskLog.LogName.DEBUGOUT,taid,true).length() > 0); }

Class: org.apache.hadoop.mapred.TestTaskLogAppender

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * test TaskLogAppender */ @SuppressWarnings("deprecation") @Test(timeout=5000) public void testTaskLogAppender(){ TaskLogAppender appender=new TaskLogAppender(); System.setProperty(TaskLogAppender.TASKID_PROPERTY,"attempt_01_02_m03_04_001"); System.setProperty(TaskLogAppender.LOGSIZE_PROPERTY,"1003"); appender.activateOptions(); assertEquals(appender.getTaskId(),"attempt_01_02_m03_04_001"); assertEquals(appender.getTotalLogFileSize(),1000); assertEquals(appender.getIsCleanup(),false); Writer writer=new StringWriter(); appender.setWriter(writer); Layout layout=new PatternLayout("%-5p [%t]: %m%n"); appender.setLayout(layout); Category logger=Logger.getLogger(getClass().getName()); LoggingEvent event=new LoggingEvent("fqnOfCategoryClass",logger,Priority.INFO,"message",new Throwable()); appender.append(event); appender.flush(); appender.close(); assertTrue(writer.toString().length() > 0); appender=new TaskLogAppender(); appender.setIsCleanup(true); appender.activateOptions(); assertEquals(appender.getIsCleanup(),true); }

Class: org.apache.hadoop.mapred.TestTaskPerformanceSplits

InternalCallVerifier EqualityVerifier 
@Test public void testPeriodStatsets(){ PeriodicStatsAccumulator cumulative=new CumulativePeriodicStats(8); PeriodicStatsAccumulator status=new StatePeriodicStats(8); cumulative.extend(0.0D,0); cumulative.extend(0.4375D,700); cumulative.extend(0.5625D,1100); cumulative.extend(0.625D,1300); cumulative.extend(1.0D,7901); int total=0; int[] results=cumulative.getValues(); for (int i=0; i < 8; ++i) { System.err.println("segment i = " + results[i]); } assertEquals("Bad interpolation in cumulative segment 0",200,results[0]); assertEquals("Bad interpolation in cumulative segment 1",200,results[1]); assertEquals("Bad interpolation in cumulative segment 2",200,results[2]); assertEquals("Bad interpolation in cumulative segment 3",300,results[3]); assertEquals("Bad interpolation in cumulative segment 4",400,results[4]); assertEquals("Bad interpolation in cumulative segment 5",2200,results[5]); assertEquals("Bad interpolation in cumulative segment 6",2200,results[6]); assertEquals("Bad interpolation in cumulative segment 7",2201,results[7]); status.extend(0.0D,0); status.extend(1.0D / 16.0D,300); status.extend(3.0D / 16.0D,700); status.extend(7.0D / 16.0D,2300); status.extend(1.0D,1400); ; results=status.getValues(); assertEquals("Bad interpolation in status segment 0",275,results[0]); assertEquals("Bad interpolation in status segment 1",750,results[1]); assertEquals("Bad interpolation in status segment 2",1500,results[2]); assertEquals("Bad interpolation in status segment 3",2175,results[3]); assertEquals("Bad interpolation in status segment 4",2100,results[4]); assertEquals("Bad interpolation in status segment 5",1900,results[5]); assertEquals("Bad interpolation in status segment 6",1700,results[6]); assertEquals("Bad interpolation in status segment 7",1500,results[7]); }

Class: org.apache.hadoop.mapred.TestTaskStatus

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test the {@link TaskStatus} against large sized task-diagnostic-info and * state-string. Does the following * - create Map/Reduce TaskStatus such that the task-diagnostic-info and * state-string are small strings and check their contents * - append them with small string and check their contents * - append them with large string and check their size * - update the status using statusUpdate() calls and check the size/contents * - create Map/Reduce TaskStatus with large string and check their size */ @Test public void testTaskDiagnosticsAndStateString(){ String test="hi"; final int maxSize=16; TaskStatus status=new TaskStatus(null,0,0,null,test,test,null,null,null){ @Override protected int getMaxStringSize(){ return maxSize; } @Override public void addFetchFailedMap( TaskAttemptID mapTaskId){ } @Override public boolean getIsMap(){ return false; } } ; assertEquals("Small diagnostic info test failed",status.getDiagnosticInfo(),test); assertEquals("Small state string test failed",status.getStateString(),test); String newDInfo=test.concat(test); status.setDiagnosticInfo(test); status.setStateString(newDInfo); assertEquals("Small diagnostic info append failed",newDInfo,status.getDiagnosticInfo()); assertEquals("Small state-string append failed",newDInfo,status.getStateString()); TaskStatus newStatus=(TaskStatus)status.clone(); String newSInfo="hi1"; newStatus.setStateString(newSInfo); status.statusUpdate(newStatus); newDInfo=newDInfo.concat(newStatus.getDiagnosticInfo()); assertEquals("Status-update on diagnostic-info failed",newDInfo,status.getDiagnosticInfo()); assertEquals("Status-update on state-string failed",newSInfo,status.getStateString()); newSInfo="hi2"; status.statusUpdate(0,newSInfo,null); assertEquals("Status-update on state-string failed",newSInfo,status.getStateString()); newSInfo="hi3"; status.statusUpdate(null,0,newSInfo,null,0); assertEquals("Status-update on state-string failed",newSInfo,status.getStateString()); String large="hihihihihihihihihihi"; status.setDiagnosticInfo(large); status.setStateString(large); assertEquals("Large diagnostic info append test failed",maxSize,status.getDiagnosticInfo().length()); assertEquals("Large state-string append test failed",maxSize,status.getStateString().length()); newStatus.setDiagnosticInfo(large + "0"); newStatus.setStateString(large + "1"); status.statusUpdate(newStatus); assertEquals("Status-update on diagnostic info failed",maxSize,status.getDiagnosticInfo().length()); assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length()); status.statusUpdate(0,large + "2",null); assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length()); status.statusUpdate(null,0,large + "3",null,0); assertEquals("Status-update on state-string failed",maxSize,status.getStateString().length()); status=new TaskStatus(null,0,0,null,large,large,null,null,null){ @Override protected int getMaxStringSize(){ return maxSize; } @Override public void addFetchFailedMap( TaskAttemptID mapTaskId){ } @Override public boolean getIsMap(){ return false; } } ; assertEquals("Large diagnostic info test failed",maxSize,status.getDiagnosticInfo().length()); assertEquals("Large state-string test failed",maxSize,status.getStateString().length()); }

Class: org.apache.hadoop.mapred.TestTextInputFormat

APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=500000) public void testFormat() throws Exception { JobConf job=new JobConf(defaultConf); Path file=new Path(workDir,"test.txt"); Reporter reporter=Reporter.NULL; int seed=new Random().nextInt(); LOG.info("seed = " + seed); Random random=new Random(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) { LOG.debug("creating; entries = " + length); Writer writer=new OutputStreamWriter(localFs.create(file)); try { for (int i=0; i < length; i++) { writer.write(Integer.toString(i)); writer.write("\n"); } } finally { writer.close(); } TextInputFormat format=new TextInputFormat(); format.configure(job); LongWritable key=new LongWritable(); Text value=new Text(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(MAX_LENGTH / 20) + 1; LOG.debug("splitting: requesting = " + numSplits); InputSplit[] splits=format.getSplits(job,numSplits); LOG.debug("splitting: got = " + splits.length); if (length == 0) { assertEquals("Files of length 0 are not returned from FileInputFormat.getSplits().",1,splits.length); assertEquals("Empty file length == 0",0,splits[0].getLength()); } BitSet bits=new BitSet(length); for (int j=0; j < splits.length; j++) { LOG.debug("split[" + j + "]= "+ splits[j]); RecordReader reader=format.getRecordReader(splits[j],job,reporter); try { int count=0; while (reader.next(key,value)) { int v=Integer.parseInt(value.toString()); LOG.debug("read " + v); if (bits.get(v)) { LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos()); } assertFalse("Key in multiple partitions.",bits.get(v)); bits.set(v); count++; } LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ count); } finally { reader.close(); } } assertEquals("Some keys in no partition.",length,bits.cardinality()); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Test readLine for correct interpretation of maxLineLength * (returned string should be clipped at maxLineLength, and the * remaining bytes on the same line should be thrown out). * Also check that returned value matches the string length. * Varies buffer size to stress test. * @throws Exception */ @Test(timeout=5000) public void testMaxLineLength() throws Exception { final String STR="a\nbb\n\nccc\rdddd\r\neeeee"; final int STRLENBYTES=STR.getBytes().length; Text out=new Text(); for (int bufsz=1; bufsz < STRLENBYTES + 1; ++bufsz) { LineReader in=makeStream(STR,bufsz); int c=0; c+=in.readLine(out,1); assertEquals("line1 length, bufsz: " + bufsz,1,out.getLength()); c+=in.readLine(out,1); assertEquals("line2 length, bufsz: " + bufsz,1,out.getLength()); c+=in.readLine(out,1); assertEquals("line3 length, bufsz: " + bufsz,0,out.getLength()); c+=in.readLine(out,3); assertEquals("line4 length, bufsz: " + bufsz,3,out.getLength()); c+=in.readLine(out,10); assertEquals("line5 length, bufsz: " + bufsz,4,out.getLength()); c+=in.readLine(out,8); assertEquals("line5 length, bufsz: " + bufsz,5,out.getLength()); assertEquals("end of file, bufsz: " + bufsz,0,in.readLine(out)); assertEquals("total bytes, bufsz: " + bufsz,c,STRLENBYTES); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test using the gzip codec and an empty input file */ @Test(timeout=5000) public void testGzipEmpty() throws IOException { JobConf job=new JobConf(defaultConf); CompressionCodec gzip=new GzipCodec(); ReflectionUtils.setConf(gzip,job); localFs.delete(workDir,true); writeFile(localFs,new Path(workDir,"empty.gz"),gzip,""); FileInputFormat.setInputPaths(job,workDir); TextInputFormat format=new TextInputFormat(); format.configure(job); InputSplit[] splits=format.getSplits(job,100); assertEquals("Compressed files of length 0 are not returned from FileInputFormat.getSplits().",1,splits.length); List results=readSplit(format,splits[0],job); assertEquals("Compressed empty file length == 0",0,results.size()); }

APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=900000) public void testSplitableCodecs() throws IOException { JobConf conf=new JobConf(defaultConf); int seed=new Random().nextInt(); CompressionCodec codec=null; try { codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf); } catch ( ClassNotFoundException cnfe) { throw new IOException("Illegal codec!"); } Path file=new Path(workDir,"test" + codec.getDefaultExtension()); Reporter reporter=Reporter.NULL; LOG.info("seed = " + seed); Random random=new Random(seed); FileSystem localFs=FileSystem.getLocal(conf); localFs.delete(workDir,true); FileInputFormat.setInputPaths(conf,workDir); final int MAX_LENGTH=500000; for (int length=MAX_LENGTH / 2; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) { LOG.info("creating; entries = " + length); Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file))); try { for (int i=0; i < length; i++) { writer.write(Integer.toString(i)); writer.write("\n"); } } finally { writer.close(); } TextInputFormat format=new TextInputFormat(); format.configure(conf); LongWritable key=new LongWritable(); Text value=new Text(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1; LOG.info("splitting: requesting = " + numSplits); InputSplit[] splits=format.getSplits(conf,numSplits); LOG.info("splitting: got = " + splits.length); BitSet bits=new BitSet(length); for (int j=0; j < splits.length; j++) { LOG.debug("split[" + j + "]= "+ splits[j]); RecordReader reader=format.getRecordReader(splits[j],conf,reporter); try { int counter=0; while (reader.next(key,value)) { int v=Integer.parseInt(value.toString()); LOG.debug("read " + v); if (bits.get(v)) { LOG.warn("conflict with " + v + " in split "+ j+ " at position "+ reader.getPos()); } assertFalse("Key in multiple partitions.",bits.get(v)); bits.set(v); counter++; } if (counter > 0) { LOG.info("splits[" + j + "]="+ splits[j]+ " count="+ counter); } else { LOG.debug("splits[" + j + "]="+ splits[j]+ " count="+ counter); } } finally { reader.close(); } } assertEquals("Some keys in no partition.",length,bits.cardinality()); } } }

APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
/** * Test readLine for various kinds of line termination sequneces. * Varies buffer size to stress test. Also check that returned * value matches the string length. * @throws Exception */ @Test(timeout=5000) public void testNewLines() throws Exception { final String STR="a\nbb\n\nccc\rdddd\r\r\r\n\r\neeeee"; final int STRLENBYTES=STR.getBytes().length; Text out=new Text(); for (int bufsz=1; bufsz < STRLENBYTES + 1; ++bufsz) { LineReader in=makeStream(STR,bufsz); int c=0; c+=in.readLine(out); assertEquals("line1 length, bufsz:" + bufsz,1,out.getLength()); c+=in.readLine(out); assertEquals("line2 length, bufsz:" + bufsz,2,out.getLength()); c+=in.readLine(out); assertEquals("line3 length, bufsz:" + bufsz,0,out.getLength()); c+=in.readLine(out); assertEquals("line4 length, bufsz:" + bufsz,3,out.getLength()); c+=in.readLine(out); assertEquals("line5 length, bufsz:" + bufsz,4,out.getLength()); c+=in.readLine(out); assertEquals("line6 length, bufsz:" + bufsz,0,out.getLength()); c+=in.readLine(out); assertEquals("line7 length, bufsz:" + bufsz,0,out.getLength()); c+=in.readLine(out); assertEquals("line8 length, bufsz:" + bufsz,0,out.getLength()); c+=in.readLine(out); assertEquals("line9 length, bufsz:" + bufsz,5,out.getLength()); assertEquals("end of file, bufsz: " + bufsz,0,in.readLine(out)); assertEquals("total bytes, bufsz: " + bufsz,c,STRLENBYTES); } }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test using the gzip codec for reading */ @Test(timeout=5000) public void testGzip() throws IOException { JobConf job=new JobConf(defaultConf); CompressionCodec gzip=new GzipCodec(); ReflectionUtils.setConf(gzip,job); localFs.delete(workDir,true); writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n"); writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n"); FileInputFormat.setInputPaths(job,workDir); TextInputFormat format=new TextInputFormat(); format.configure(job); InputSplit[] splits=format.getSplits(job,100); assertEquals("compressed splits == 2",2,splits.length); FileSplit tmp=(FileSplit)splits[0]; if (tmp.getPath().getName().equals("part2.txt.gz")) { splits[0]=splits[1]; splits[1]=tmp; } List results=readSplit(format,splits[0],job); assertEquals("splits[0] length",6,results.size()); assertEquals("splits[0][5]"," dog",results.get(5).toString()); results=readSplit(format,splits[1],job); assertEquals("splits[1] length",2,results.size()); assertEquals("splits[1][0]","this is a test",results.get(0).toString()); assertEquals("splits[1][1]","of gzip",results.get(1).toString()); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=5000) public void testUTF8() throws Exception { LineReader in=makeStream("abcd\u20acbdcd\u20ac"); Text line=new Text(); in.readLine(line); assertEquals("readLine changed utf8 characters","abcd\u20acbdcd\u20ac",line.toString()); in=makeStream("abc\u200axyz"); in.readLine(line); assertEquals("split on fake newline","abc\u200axyz",line.toString()); }

Class: org.apache.hadoop.mapred.TestTextOutputFormat

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testFormat() throws Exception { JobConf job=new JobConf(); job.set(JobContext.TASK_ATTEMPT_ID,attempt); FileOutputFormat.setOutputPath(job,workDir.getParent().getParent()); FileOutputFormat.setWorkOutputPath(job,workDir); FileSystem fs=workDir.getFileSystem(job); if (!fs.mkdirs(workDir)) { fail("Failed to create output directory"); } String file="test_format.txt"; Reporter reporter=Reporter.NULL; TextOutputFormat theOutputFormat=new TextOutputFormat(); RecordWriter theRecordWriter=theOutputFormat.getRecordWriter(localFs,job,file,reporter); Text key1=new Text("key1"); Text key2=new Text("key2"); Text val1=new Text("val1"); Text val2=new Text("val2"); NullWritable nullWritable=NullWritable.get(); try { theRecordWriter.write(key1,val1); theRecordWriter.write(null,nullWritable); theRecordWriter.write(null,val1); theRecordWriter.write(nullWritable,val2); theRecordWriter.write(key2,nullWritable); theRecordWriter.write(key1,null); theRecordWriter.write(null,null); theRecordWriter.write(key2,val2); } finally { theRecordWriter.close(reporter); } File expectedFile=new File(new Path(workDir,file).toString()); StringBuffer expectedOutput=new StringBuffer(); expectedOutput.append(key1).append('\t').append(val1).append("\n"); expectedOutput.append(val1).append("\n"); expectedOutput.append(val2).append("\n"); expectedOutput.append(key2).append("\n"); expectedOutput.append(key1).append("\n"); expectedOutput.append(key2).append('\t').append(val2).append("\n"); String output=UtilsForTests.slurp(expectedFile); assertEquals(expectedOutput.toString(),output); }

BranchVerifier UtilityVerifier EqualityVerifier HybridVerifier 
/** * test compressed file * @throws IOException */ @Test public void testCompress() throws IOException { JobConf job=new JobConf(); job.set(JobContext.TASK_ATTEMPT_ID,attempt); job.set(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.COMPRESS,"true"); FileOutputFormat.setOutputPath(job,workDir.getParent().getParent()); FileOutputFormat.setWorkOutputPath(job,workDir); FileSystem fs=workDir.getFileSystem(job); if (!fs.mkdirs(workDir)) { fail("Failed to create output directory"); } String file="test_compress.txt"; Reporter reporter=Reporter.NULL; TextOutputFormat theOutputFormat=new TextOutputFormat(); RecordWriter theRecordWriter=theOutputFormat.getRecordWriter(localFs,job,file,reporter); Text key1=new Text("key1"); Text key2=new Text("key2"); Text val1=new Text("val1"); Text val2=new Text("val2"); NullWritable nullWritable=NullWritable.get(); try { theRecordWriter.write(key1,val1); theRecordWriter.write(null,nullWritable); theRecordWriter.write(null,val1); theRecordWriter.write(nullWritable,val2); theRecordWriter.write(key2,nullWritable); theRecordWriter.write(key1,null); theRecordWriter.write(null,null); theRecordWriter.write(key2,val2); } finally { theRecordWriter.close(reporter); } StringBuffer expectedOutput=new StringBuffer(); expectedOutput.append(key1).append("\t").append(val1).append("\n"); expectedOutput.append(val1).append("\n"); expectedOutput.append(val2).append("\n"); expectedOutput.append(key2).append("\n"); expectedOutput.append(key1).append("\n"); expectedOutput.append(key2).append("\t").append(val2).append("\n"); DefaultCodec codec=new DefaultCodec(); codec.setConf(job); Path expectedFile=new Path(workDir,file + codec.getDefaultExtension()); final FileInputStream istream=new FileInputStream(expectedFile.toString()); CompressionInputStream cistream=codec.createInputStream(istream); LineReader reader=new LineReader(cistream); String output=""; Text out=new Text(); while (reader.readLine(out) > 0) { output+=out; output+="\n"; } reader.close(); assertEquals(expectedOutput.toString(),output); }

APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
@Test public void testFormatWithCustomSeparator() throws Exception { JobConf job=new JobConf(); String separator="\u0001"; job.set("mapreduce.output.textoutputformat.separator",separator); job.set(JobContext.TASK_ATTEMPT_ID,attempt); FileOutputFormat.setOutputPath(job,workDir.getParent().getParent()); FileOutputFormat.setWorkOutputPath(job,workDir); FileSystem fs=workDir.getFileSystem(job); if (!fs.mkdirs(workDir)) { fail("Failed to create output directory"); } String file="test_custom.txt"; Reporter reporter=Reporter.NULL; TextOutputFormat theOutputFormat=new TextOutputFormat(); RecordWriter theRecordWriter=theOutputFormat.getRecordWriter(localFs,job,file,reporter); Text key1=new Text("key1"); Text key2=new Text("key2"); Text val1=new Text("val1"); Text val2=new Text("val2"); NullWritable nullWritable=NullWritable.get(); try { theRecordWriter.write(key1,val1); theRecordWriter.write(null,nullWritable); theRecordWriter.write(null,val1); theRecordWriter.write(nullWritable,val2); theRecordWriter.write(key2,nullWritable); theRecordWriter.write(key1,null); theRecordWriter.write(null,null); theRecordWriter.write(key2,val2); } finally { theRecordWriter.close(reporter); } File expectedFile=new File(new Path(workDir,file).toString()); StringBuffer expectedOutput=new StringBuffer(); expectedOutput.append(key1).append(separator).append(val1).append("\n"); expectedOutput.append(val1).append("\n"); expectedOutput.append(val2).append("\n"); expectedOutput.append(key2).append("\n"); expectedOutput.append(key1).append("\n"); expectedOutput.append(key2).append(separator).append(val2).append("\n"); String output=UtilsForTests.slurp(expectedFile); assertEquals(expectedOutput.toString(),output); }

Class: org.apache.hadoop.mapred.TestYARNRunner

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=20000) public void testHistoryServerToken() throws Exception { conf.set(YarnConfiguration.RM_PRINCIPAL,"foo@LOCAL"); final String masterPrincipal=Master.getMasterPrincipal(conf); final MRClientProtocol hsProxy=mock(MRClientProtocol.class); when(hsProxy.getDelegationToken(any(GetDelegationTokenRequest.class))).thenAnswer(new Answer(){ public GetDelegationTokenResponse answer( InvocationOnMock invocation){ GetDelegationTokenRequest request=(GetDelegationTokenRequest)invocation.getArguments()[0]; assertEquals(masterPrincipal,request.getRenewer()); org.apache.hadoop.yarn.api.records.Token token=recordFactory.newRecordInstance(org.apache.hadoop.yarn.api.records.Token.class); token.setKind(""); token.setService(""); token.setIdentifier(ByteBuffer.allocate(0)); token.setPassword(ByteBuffer.allocate(0)); GetDelegationTokenResponse tokenResponse=recordFactory.newRecordInstance(GetDelegationTokenResponse.class); tokenResponse.setDelegationToken(token); return tokenResponse; } } ); UserGroupInformation.createRemoteUser("someone").doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { yarnRunner=new YARNRunner(conf,null,null); yarnRunner.getDelegationTokenFromHS(hsProxy); verify(hsProxy).getDelegationToken(any(GetDelegationTokenRequest.class)); return null; } } ); }

APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
@Test public void testAMStandardEnv() throws Exception { final String ADMIN_LIB_PATH="foo"; final String USER_LIB_PATH="bar"; final String USER_SHELL="shell"; JobConf jobConf=new JobConf(); jobConf.set(MRJobConfig.MR_AM_ADMIN_USER_ENV,"LD_LIBRARY_PATH=" + ADMIN_LIB_PATH); jobConf.set(MRJobConfig.MR_AM_ENV,"LD_LIBRARY_PATH=" + USER_LIB_PATH); jobConf.set(MRJobConfig.MAPRED_ADMIN_USER_SHELL,USER_SHELL); YARNRunner yarnRunner=new YARNRunner(jobConf); ApplicationSubmissionContext appSubCtx=buildSubmitContext(yarnRunner,jobConf); ContainerLaunchContext clc=appSubCtx.getAMContainerSpec(); Map env=clc.getEnvironment(); String libPath=env.get(Environment.LD_LIBRARY_PATH.name()); assertNotNull("LD_LIBRARY_PATH not set",libPath); String cps=jobConf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM) ? ApplicationConstants.CLASS_PATH_SEPARATOR : File.pathSeparator; assertEquals("Bad AM LD_LIBRARY_PATH setting",MRApps.crossPlatformifyMREnv(conf,Environment.PWD) + cps + ADMIN_LIB_PATH+ cps+ USER_LIB_PATH,libPath); String shell=env.get(Environment.SHELL.name()); assertNotNull("SHELL not set",shell); assertEquals("Bad SHELL setting",USER_SHELL,shell); }

Class: org.apache.hadoop.mapred.gridmix.TestCompressionEmulationUtils

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test if {@link CompressionEmulationUtil#configureCompressionEmulation(org.apache.hadoop.mapred.JobConf,org.apache.hadoop.mapred.JobConf)}can extract compression related configuration parameters. */ @Test public void testExtractCompressionConfigs(){ JobConf source=new JobConf(); JobConf target=new JobConf(); source.setBoolean(FileOutputFormat.COMPRESS,false); source.set(FileOutputFormat.COMPRESS_CODEC,"MyDefaultCodec"); source.set(FileOutputFormat.COMPRESS_TYPE,"MyDefaultType"); source.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,false); source.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC,"MyDefaultCodec2"); CompressionEmulationUtil.configureCompressionEmulation(source,target); assertFalse(target.getBoolean(FileOutputFormat.COMPRESS,true)); assertEquals("MyDefaultCodec",target.get(FileOutputFormat.COMPRESS_CODEC)); assertEquals("MyDefaultType",target.get(FileOutputFormat.COMPRESS_TYPE)); assertFalse(target.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true)); assertEquals("MyDefaultCodec2",target.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC)); assertFalse(CompressionEmulationUtil.isInputCompressionEmulationEnabled(target)); source.setBoolean(FileOutputFormat.COMPRESS,true); source.set(FileOutputFormat.COMPRESS_CODEC,"MyCodec"); source.set(FileOutputFormat.COMPRESS_TYPE,"MyType"); source.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true); source.set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC,"MyCodec2"); org.apache.hadoop.mapred.FileInputFormat.setInputPaths(source,"file.gz"); target=new JobConf(); CompressionEmulationUtil.configureCompressionEmulation(source,target); assertTrue(target.getBoolean(FileOutputFormat.COMPRESS,false)); assertEquals("MyCodec",target.get(FileOutputFormat.COMPRESS_CODEC)); assertEquals("MyType",target.get(FileOutputFormat.COMPRESS_TYPE)); assertTrue(target.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,false)); assertEquals("MyCodec2",target.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC)); assertTrue(CompressionEmulationUtil.isInputCompressionEmulationEnabled(target)); }

APIUtilityVerifier EqualityVerifier 
/** * Tests the computation logic of uncompressed input bytes by{@link LoadJob#getUncompressedInputBytes(long,Configuration)} */ @Test public void testComputeUncompressedInputBytes(){ long possiblyCompressedInputBytes=100000; float compressionRatio=0.45F; Configuration conf=new Configuration(); CompressionEmulationUtil.setMapInputCompressionEmulationRatio(conf,compressionRatio); long result=CompressionEmulationUtil.getUncompressedInputBytes(possiblyCompressedInputBytes,conf); assertEquals(possiblyCompressedInputBytes,result); CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true); result=CompressionEmulationUtil.getUncompressedInputBytes(possiblyCompressedInputBytes,conf); assertEquals((long)(possiblyCompressedInputBytes / compressionRatio),result); }

APIUtilityVerifier EqualityVerifier 
/** * Test of {@link FileQueue} can identify compressed file and provide * readers to extract uncompressed data only if input-compression is enabled. */ @Test public void testFileQueueDecompression() throws IOException { JobConf conf=new JobConf(); FileSystem lfs=FileSystem.getLocal(conf); String inputLine="Hi Hello!"; CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true); org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf,true); org.apache.hadoop.mapred.FileOutputFormat.setOutputCompressorClass(conf,GzipCodec.class); Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory()); Path tempDir=new Path(rootTempDir,"TestFileQueueDecompression"); lfs.delete(tempDir,true); Path compressedFile=new Path(tempDir,"test"); OutputStream out=CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile,conf); BufferedWriter writer=new BufferedWriter(new OutputStreamWriter(out)); writer.write(inputLine); writer.close(); compressedFile=compressedFile.suffix(".gz"); long fileSize=lfs.listStatus(compressedFile)[0].getLen(); CombineFileSplit split=new CombineFileSplit(new Path[]{compressedFile},new long[]{fileSize}); FileQueue queue=new FileQueue(split,conf); byte[] bytes=new byte[inputLine.getBytes().length]; queue.read(bytes); queue.close(); String readLine=new String(bytes); assertEquals("Compression/Decompression error",inputLine,readLine); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test {@link CompressionEmulationUtil#getPossiblyDecompressedInputStream(Path,Configuration,long)}and{@link CompressionEmulationUtil#getPossiblyCompressedOutputStream(Path,Configuration)}. */ @Test public void testPossiblyCompressedDecompressedStreams() throws IOException { JobConf conf=new JobConf(); FileSystem lfs=FileSystem.getLocal(conf); String inputLine="Hi Hello!"; CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true); conf.setBoolean(FileOutputFormat.COMPRESS,true); conf.setClass(FileOutputFormat.COMPRESS_CODEC,GzipCodec.class,CompressionCodec.class); Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory()); Path tempDir=new Path(rootTempDir,"TestPossiblyCompressedDecompressedStreams"); lfs.delete(tempDir,true); Path compressedFile=new Path(tempDir,"test"); OutputStream out=CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile,conf); BufferedWriter writer=new BufferedWriter(new OutputStreamWriter(out)); writer.write(inputLine); writer.close(); compressedFile=compressedFile.suffix(".gz"); InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(compressedFile,conf,0); BufferedReader reader=new BufferedReader(new InputStreamReader(in)); String readLine=reader.readLine(); assertEquals("Compression/Decompression error",inputLine,readLine); reader.close(); }

EqualityVerifier 
/** * Test reduce output compression ratio configuration utilities. */ @Test public void testOutputCompressionRatioConfiguration() throws Exception { Configuration conf=new Configuration(); float ratio=0.567F; CompressionEmulationUtil.setJobOutputCompressionEmulationRatio(conf,ratio); assertEquals(ratio,CompressionEmulationUtil.getJobOutputCompressionEmulationRatio(conf),0.0D); }

EqualityVerifier 
/** * Test compression ratio standardization. */ @Test public void testCompressionRatioStandardization() throws Exception { assertEquals(0.55F,CompressionEmulationUtil.standardizeCompressionRatio(0.55F),0.0D); assertEquals(0.65F,CompressionEmulationUtil.standardizeCompressionRatio(0.652F),0.0D); assertEquals(0.78F,CompressionEmulationUtil.standardizeCompressionRatio(0.777F),0.0D); assertEquals(0.86F,CompressionEmulationUtil.standardizeCompressionRatio(0.855F),0.0D); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test compressible {@link GridmixRecord}. */ @Test public void testCompressibleGridmixRecord() throws IOException { JobConf conf=new JobConf(); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf,true); FileSystem lfs=FileSystem.getLocal(conf); int dataSize=1024 * 1024 * 10; float ratio=0.357F; Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")).makeQualified(lfs.getUri(),lfs.getWorkingDirectory()); Path tempDir=new Path(rootTempDir,"TestPossiblyCompressibleGridmixRecord"); lfs.delete(tempDir,true); GridmixRecord record=new GridmixRecord(dataSize,0); record.setCompressibility(true,ratio); conf.setClass(FileOutputFormat.COMPRESS_CODEC,GzipCodec.class,CompressionCodec.class); org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf,true); Path recordFile=new Path(tempDir,"record"); OutputStream outStream=CompressionEmulationUtil.getPossiblyCompressedOutputStream(recordFile,conf); DataOutputStream out=new DataOutputStream(outStream); record.write(out); out.close(); outStream.close(); Path actualRecordFile=recordFile.suffix(".gz"); InputStream in=CompressionEmulationUtil.getPossiblyDecompressedInputStream(actualRecordFile,conf,0); long compressedFileSize=lfs.listStatus(actualRecordFile)[0].getLen(); GridmixRecord recordRead=new GridmixRecord(); recordRead.readFields(new DataInputStream(in)); assertEquals("Record size mismatch in a compressible GridmixRecord",dataSize,recordRead.getSize()); assertTrue("Failed to generate a compressible GridmixRecord",recordRead.getSize() > compressedFileSize); float seenRatio=((float)compressedFileSize) / dataSize; assertEquals(CompressionEmulationUtil.standardizeCompressionRatio(ratio),CompressionEmulationUtil.standardizeCompressionRatio(seenRatio),1.0D); }

EqualityVerifier 
/** * Test map output compression ratio configuration utilities. */ @Test public void testIntermediateCompressionRatioConfiguration() throws Exception { Configuration conf=new Configuration(); float ratio=0.567F; CompressionEmulationUtil.setMapOutputCompressionEmulationRatio(conf,ratio); assertEquals(ratio,CompressionEmulationUtil.getMapOutputCompressionEmulationRatio(conf),0.0D); }

EqualityVerifier 
/** * Test map input compression ratio configuration utilities. */ @Test public void testInputCompressionRatioConfiguration() throws Exception { Configuration conf=new Configuration(); float ratio=0.567F; CompressionEmulationUtil.setMapInputCompressionEmulationRatio(conf,ratio); assertEquals(ratio,CompressionEmulationUtil.getMapInputCompressionEmulationRatio(conf),0.0D); }

Class: org.apache.hadoop.mapred.gridmix.TestDistCacheEmulation

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Validate GenerateDistCacheData job if it creates dist cache files properly. * @throws Exception */ @Test(timeout=200000) public void testGenerateDistCacheData() throws Exception { long[] sortedFileSizes=new long[5]; Configuration jobConf=runSetupGenerateDistCacheData(true,sortedFileSizes); GridmixJob gridmixJob=new GenerateDistCacheData(jobConf); Job job=gridmixJob.call(); assertEquals("Number of reduce tasks in GenerateDistCacheData is not 0.",0,job.getNumReduceTasks()); assertTrue("GenerateDistCacheData job failed.",job.waitForCompletion(false)); validateDistCacheData(jobConf,sortedFileSizes); }

InternalCallVerifier EqualityVerifier 
/** * test method configureDistCacheFiles */ @Test(timeout=2000) public void testDistCacheEmulator() throws Exception { Configuration conf=new Configuration(); configureDummyDistCacheFiles(conf); File ws=new File("target" + File.separator + this.getClass().getName()); Path ioPath=new Path(ws.getAbsolutePath()); DistributedCacheEmulator dce=new DistributedCacheEmulator(conf,ioPath); JobConf jobConf=new JobConf(conf); jobConf.setUser(UserGroupInformation.getCurrentUser().getShortUserName()); File fin=new File("src" + File.separator + "test"+ File.separator+ "resources"+ File.separator+ "data"+ File.separator+ "wordcount.json"); dce.init(fin.getAbsolutePath(),JobCreator.LOADJOB,true); dce.configureDistCacheFiles(conf,jobConf); String[] caches=conf.getStrings(MRJobConfig.CACHE_FILES); String[] tmpfiles=conf.getStrings("tmpfiles"); assertEquals(6,((caches == null ? 0 : caches.length) + (tmpfiles == null ? 0 : tmpfiles.length))); }

Class: org.apache.hadoop.mapred.gridmix.TestFilePool

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test public void testPool() throws Exception { final Random r=new Random(); final Configuration conf=new Configuration(); conf.setLong(FilePool.GRIDMIX_MIN_FILE,3 * 1024); final FilePool pool=new FilePool(conf,base); pool.refresh(); final ArrayList files=new ArrayList(); final int expectedPoolSize=(NFILES / 2 * (NFILES / 2 + 1) - 6) * 1024; assertEquals(expectedPoolSize,pool.getInputFiles(Long.MAX_VALUE,files)); assertEquals(NFILES - 4,files.size()); files.clear(); assertEquals(expectedPoolSize,pool.getInputFiles(expectedPoolSize,files)); files.clear(); final long rand=r.nextInt(expectedPoolSize); assertTrue("Missed: " + rand,(NFILES / 2) * 1024 > rand - pool.getInputFiles(rand,files)); conf.setLong(FilePool.GRIDMIX_MIN_FILE,0); pool.refresh(); files.clear(); assertEquals((NFILES / 2 * (NFILES / 2 + 1)) * 1024,pool.getInputFiles(Long.MAX_VALUE,files)); }

Class: org.apache.hadoop.mapred.gridmix.TestFileQueue

APIUtilityVerifier EqualityVerifier 
@Test public void testRepeat() throws Exception { final Configuration conf=new Configuration(); Arrays.fill(loc,""); Arrays.fill(start,0L); Arrays.fill(len,BLOCK); final ByteArrayOutputStream out=fillVerif(); final FileQueue q=new FileQueue(new CombineFileSplit(paths,start,len,loc),conf); final byte[] verif=out.toByteArray(); final byte[] check=new byte[2 * NFILES * BLOCK]; q.read(check,0,NFILES * BLOCK); assertArrayEquals(verif,Arrays.copyOf(check,NFILES * BLOCK)); final byte[] verif2=new byte[2 * NFILES * BLOCK]; System.arraycopy(verif,0,verif2,0,verif.length); System.arraycopy(verif,0,verif2,verif.length,verif.length); q.read(check,0,2 * NFILES * BLOCK); assertArrayEquals(verif2,check); }

APIUtilityVerifier EqualityVerifier 
@Test public void testUneven() throws Exception { final Configuration conf=new Configuration(); Arrays.fill(loc,""); Arrays.fill(start,0L); Arrays.fill(len,BLOCK); final int B2=BLOCK / 2; for (int i=0; i < NFILES; i+=2) { start[i]+=B2; len[i]-=B2; } final FileQueue q=new FileQueue(new CombineFileSplit(paths,start,len,loc),conf); final ByteArrayOutputStream out=fillVerif(); final byte[] verif=out.toByteArray(); final byte[] check=new byte[NFILES / 2 * BLOCK + NFILES / 2 * B2]; q.read(check,0,verif.length); assertArrayEquals(verif,Arrays.copyOf(check,verif.length)); q.read(check,0,verif.length); assertArrayEquals(verif,Arrays.copyOf(check,verif.length)); }

Class: org.apache.hadoop.mapred.gridmix.TestGridMixClasses

APIUtilityVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=120000) public void testSerialReaderThread() throws Exception { Configuration conf=new Configuration(); File fin=new File("src" + File.separator + "test"+ File.separator+ "resources"+ File.separator+ "data"+ File.separator+ "wordcount2.json"); JobStoryProducer jobProducer=new ZombieJobProducer(new Path(fin.getAbsolutePath()),null,conf); CountDownLatch startFlag=new CountDownLatch(1); UserResolver resolver=new SubmitterUserResolver(); FakeJobSubmitter submitter=new FakeJobSubmitter(); File ws=new File("target" + File.separator + this.getClass().getName()); if (!ws.exists()) { Assert.assertTrue(ws.mkdirs()); } SerialJobFactory jobFactory=new SerialJobFactory(submitter,jobProducer,new Path(ws.getAbsolutePath()),conf,startFlag,resolver); Path ioPath=new Path(ws.getAbsolutePath()); jobFactory.setDistCacheEmulator(new DistributedCacheEmulator(conf,ioPath)); Thread test=jobFactory.createReaderThread(); test.start(); Thread.sleep(1000); assertEquals(0,submitter.getJobs().size()); startFlag.countDown(); while (test.isAlive()) { Thread.sleep(1000); jobFactory.update(null); } assertEquals(2,submitter.getJobs().size()); }

EqualityVerifier 
@Test(timeout=3000) public void testReadRecordFactory() throws Exception { RecordFactory rf=new FakeRecordFactory(); FakeInputStream input=new FakeInputStream(); ReadRecordFactory test=new ReadRecordFactory(rf,input,new Configuration()); GridmixKey key=new GridmixKey(GridmixKey.DATA,100,2); GridmixRecord val=new GridmixRecord(200,2); while (test.next(key,val)) { } assertEquals(3000,input.getCounter()); assertEquals(-1,rf.getProgress(),0.01); test.close(); }

APIUtilityVerifier EqualityVerifier 
@SuppressWarnings({"rawtypes","unchecked"}) @Test(timeout=10000) public void testLoadMapper() throws Exception { Configuration conf=new Configuration(); conf.setInt(JobContext.NUM_REDUCES,2); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true); TaskAttemptID taskId=new TaskAttemptID(); RecordReader reader=new FakeRecordReader(); LoadRecordGkGrWriter writer=new LoadRecordGkGrWriter(); OutputCommitter committer=new CustomOutputCommitter(); StatusReporter reporter=new TaskAttemptContextImpl.DummyReporter(); LoadSplit split=getLoadSplit(); MapContext mapContext=new MapContextImpl(conf,taskId,reader,writer,committer,reporter,split); Context ctx=new WrappedMapper().getMapContext(mapContext); reader.initialize(split,ctx); ctx.getConfiguration().setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true); CompressionEmulationUtil.setCompressionEmulationEnabled(ctx.getConfiguration(),true); LoadJob.LoadMapper mapper=new LoadJob.LoadMapper(); mapper.run(ctx); Map data=writer.getData(); assertEquals(2,data.size()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=3000) public void testLoadJobLoadSortComparator() throws Exception { LoadJob.LoadSortComparator test=new LoadJob.LoadSortComparator(); ByteArrayOutputStream data=new ByteArrayOutputStream(); DataOutputStream dos=new DataOutputStream(data); WritableUtils.writeVInt(dos,2); WritableUtils.writeVInt(dos,1); WritableUtils.writeVInt(dos,4); WritableUtils.writeVInt(dos,7); WritableUtils.writeVInt(dos,4); byte[] b1=data.toByteArray(); byte[] b2=data.toByteArray(); assertEquals(0,test.compare(b1,0,1,b2,0,1)); b2[2]=5; assertEquals(-1,test.compare(b1,0,1,b2,0,1)); b2[2]=2; assertEquals(2,test.compare(b1,0,1,b2,0,1)); b2[2]=4; assertEquals(1,test.compare(b1,0,1,b2,1,1)); }

BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=3000) public void testSleepReducer() throws Exception { Configuration conf=new Configuration(); conf.setInt(JobContext.NUM_REDUCES,2); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); conf.setBoolean(FileOutputFormat.COMPRESS,true); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true); TaskAttemptID taskId=new TaskAttemptID(); RawKeyValueIterator input=new FakeRawKeyValueReducerIterator(); Counter counter=new GenericCounter(); Counter inputValueCounter=new GenericCounter(); RecordWriter output=new LoadRecordReduceWriter(); OutputCommitter committer=new CustomOutputCommitter(); StatusReporter reporter=new DummyReporter(); RawComparator comparator=new FakeRawComparator(); ReduceContext reducecontext=new ReduceContextImpl(conf,taskId,input,counter,inputValueCounter,output,committer,reporter,comparator,GridmixKey.class,NullWritable.class); org.apache.hadoop.mapreduce.Reducer.Context context=new WrappedReducer().getReducerContext(reducecontext); SleepReducer test=new SleepReducer(); long start=System.currentTimeMillis(); test.setup(context); long sleeper=context.getCurrentKey().getReduceOutputBytes(); assertEquals("Sleeping... " + sleeper + " ms left",context.getStatus()); assertTrue(System.currentTimeMillis() >= (start + sleeper)); test.cleanup(context); assertEquals("Slept for " + sleeper,context.getStatus()); }

IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=3000) public void testLoadJobLoadRecordReader() throws Exception { LoadJob.LoadRecordReader test=new LoadJob.LoadRecordReader(); Configuration conf=new Configuration(); FileSystem fs1=mock(FileSystem.class); when(fs1.open((Path)anyObject())).thenReturn(new FakeFSDataInputStream(new FakeInputStream())); Path p1=mock(Path.class); when(p1.getFileSystem((JobConf)anyObject())).thenReturn(fs1); FileSystem fs2=mock(FileSystem.class); when(fs2.open((Path)anyObject())).thenReturn(new FakeFSDataInputStream(new FakeInputStream())); Path p2=mock(Path.class); when(p2.getFileSystem((JobConf)anyObject())).thenReturn(fs2); Path[] paths={p1,p2}; long[] start={0,0}; long[] lengths={1000,1000}; String[] locations={"temp1","temp2"}; CombineFileSplit cfsplit=new CombineFileSplit(paths,start,lengths,locations); double[] reduceBytes={100,100}; double[] reduceRecords={2,2}; long[] reduceOutputBytes={500,500}; long[] reduceOutputRecords={2,2}; ResourceUsageMetrics metrics=new ResourceUsageMetrics(); ResourceUsageMetrics[] rMetrics={new ResourceUsageMetrics(),new ResourceUsageMetrics()}; LoadSplit input=new LoadSplit(cfsplit,2,3,1500L,2L,3000L,2L,reduceBytes,reduceRecords,reduceOutputBytes,reduceOutputRecords,metrics,rMetrics); TaskAttemptID taskId=new TaskAttemptID(); TaskAttemptContext ctx=new TaskAttemptContextImpl(conf,taskId); test.initialize(input,ctx); GridmixRecord gr=test.getCurrentValue(); int counter=0; while (test.nextKeyValue()) { gr=test.getCurrentValue(); if (counter == 0) { assertEquals(0.5,test.getProgress(),0.001); } else if (counter == 1) { assertEquals(1.0,test.getProgress(),0.001); } assertEquals(1000,gr.getSize()); counter++; } assertEquals(1000,gr.getSize()); assertEquals(2,counter); test.close(); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=1000) public void testGridmixSplit() throws Exception { Path[] files={new Path("one"),new Path("two")}; long[] start={1,2}; long[] lengths={100,200}; String[] locations={"locOne","loctwo"}; CombineFileSplit cfSplit=new CombineFileSplit(files,start,lengths,locations); ResourceUsageMetrics metrics=new ResourceUsageMetrics(); metrics.setCumulativeCpuUsage(200); double[] reduceBytes={8.1d,8.2d}; double[] reduceRecords={9.1d,9.2d}; long[] reduceOutputBytes={101L,102L}; long[] reduceOutputRecords={111L,112L}; GridmixSplit test=new GridmixSplit(cfSplit,2,3,4L,5L,6L,7L,reduceBytes,reduceRecords,reduceOutputBytes,reduceOutputRecords); ByteArrayOutputStream data=new ByteArrayOutputStream(); DataOutputStream out=new DataOutputStream(data); test.write(out); GridmixSplit copy=new GridmixSplit(); copy.readFields(new DataInputStream(new ByteArrayInputStream(data.toByteArray()))); assertEquals(test.getId(),copy.getId()); assertEquals(test.getMapCount(),copy.getMapCount()); assertEquals(test.getInputRecords(),copy.getInputRecords()); assertEquals(test.getOutputBytes()[0],copy.getOutputBytes()[0]); assertEquals(test.getOutputRecords()[0],copy.getOutputRecords()[0]); assertEquals(test.getReduceBytes(0),copy.getReduceBytes(0)); assertEquals(test.getReduceRecords(0),copy.getReduceRecords(0)); }

InternalCallVerifier EqualityVerifier 
@Test(timeout=1000) public void testLoadSplit() throws Exception { LoadSplit test=getLoadSplit(); ByteArrayOutputStream data=new ByteArrayOutputStream(); DataOutputStream out=new DataOutputStream(data); test.write(out); LoadSplit copy=new LoadSplit(); copy.readFields(new DataInputStream(new ByteArrayInputStream(data.toByteArray()))); assertEquals(test.getId(),copy.getId()); assertEquals(test.getMapCount(),copy.getMapCount()); assertEquals(test.getInputRecords(),copy.getInputRecords()); assertEquals(test.getOutputBytes()[0],copy.getOutputBytes()[0]); assertEquals(test.getOutputRecords()[0],copy.getOutputRecords()[0]); assertEquals(test.getReduceBytes(0),copy.getReduceBytes(0)); assertEquals(test.getReduceRecords(0),copy.getReduceRecords(0)); assertEquals(test.getMapResourceUsageMetrics().getCumulativeCpuUsage(),copy.getMapResourceUsageMetrics().getCumulativeCpuUsage()); assertEquals(test.getReduceResourceUsageMetrics(0).getCumulativeCpuUsage(),copy.getReduceResourceUsageMetrics(0).getCumulativeCpuUsage()); }

APIUtilityVerifier EqualityVerifier 
@Test(timeout=3000) public void testLoadJobLoadReducer() throws Exception { LoadJob.LoadReducer test=new LoadJob.LoadReducer(); Configuration conf=new Configuration(); conf.setInt(JobContext.NUM_REDUCES,2); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); conf.setBoolean(FileOutputFormat.COMPRESS,true); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true); TaskAttemptID taskid=new TaskAttemptID(); RawKeyValueIterator input=new FakeRawKeyValueIterator(); Counter counter=new GenericCounter(); Counter inputValueCounter=new GenericCounter(); LoadRecordWriter output=new LoadRecordWriter(); OutputCommitter committer=new CustomOutputCommitter(); StatusReporter reporter=new DummyReporter(); RawComparator comparator=new FakeRawComparator(); ReduceContext reduceContext=new ReduceContextImpl(conf,taskid,input,counter,inputValueCounter,output,committer,reporter,comparator,GridmixKey.class,GridmixRecord.class); reduceContext.nextKeyValue(); org.apache.hadoop.mapreduce.Reducer.Context context=new WrappedReducer().getReducerContext(reduceContext); test.run(context); assertEquals(9,counter.getValue()); assertEquals(10,inputValueCounter.getValue()); assertEquals(1,output.getData().size()); GridmixRecord record=output.getData().values().iterator().next(); assertEquals(1593,record.getSize()); }

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
@Test(timeout=3000) public void testGridmixJobSpecGroupingComparator() throws Exception { GridmixJob.SpecGroupingComparator test=new GridmixJob.SpecGroupingComparator(); ByteArrayOutputStream data=new ByteArrayOutputStream(); DataOutputStream dos=new DataOutputStream(data); WritableUtils.writeVInt(dos,2); WritableUtils.writeVInt(dos,1); WritableUtils.writeVInt(dos,0); WritableUtils.writeVInt(dos,7); WritableUtils.writeVInt(dos,4); byte[] b1=data.toByteArray(); byte[] b2=data.toByteArray(); assertEquals(0,test.compare(b1,0,1,b2,0,1)); b2[2]=1; assertEquals(-1,test.compare(b1,0,1,b2,0,1)); b2[2]=1; assertEquals(-1,test.compare(b1,0,1,b2,0,1)); assertEquals(0,test.compare(new GridmixKey(GridmixKey.DATA,100,2),new GridmixKey(GridmixKey.DATA,100,2))); assertEquals(-1,test.compare(new GridmixKey(GridmixKey.REDUCE_SPEC,100,2),new GridmixKey(GridmixKey.DATA,100,2))); assertEquals(1,test.compare(new GridmixKey(GridmixKey.DATA,100,2),new GridmixKey(GridmixKey.REDUCE_SPEC,100,2))); assertEquals(2,test.compare(new GridmixKey(GridmixKey.DATA,102,2),new GridmixKey(GridmixKey.DATA,100,2))); }

InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=30000) public void testCompareGridmixJob() throws Exception { Configuration conf=new Configuration(); Path outRoot=new Path("target"); JobStory jobDesc=mock(JobStory.class); when(jobDesc.getName()).thenReturn("JobName"); when(jobDesc.getJobConf()).thenReturn(new JobConf(conf)); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); GridmixJob j1=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,0); GridmixJob j2=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,0); GridmixJob j3=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,1); GridmixJob j4=new LoadJob(conf,1000L,jobDesc,outRoot,ugi,1); assertTrue(j1.equals(j2)); assertEquals(0,j1.compareTo(j2)); assertFalse(j1.equals(j3)); assertEquals(-1,j1.compareTo(j3)); assertEquals(-1,j1.compareTo(j4)); }

BooleanVerifier EqualityVerifier HybridVerifier 
@SuppressWarnings({"unchecked","rawtypes"}) @Test(timeout=30000) public void testSleepMapper() throws Exception { SleepJob.SleepMapper test=new SleepJob.SleepMapper(); Configuration conf=new Configuration(); conf.setInt(JobContext.NUM_REDUCES,2); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS,true); TaskAttemptID taskId=new TaskAttemptID(); FakeRecordLLReader reader=new FakeRecordLLReader(); LoadRecordGkNullWriter writer=new LoadRecordGkNullWriter(); OutputCommitter committer=new CustomOutputCommitter(); StatusReporter reporter=new TaskAttemptContextImpl.DummyReporter(); SleepSplit split=getSleepSplit(); MapContext mapcontext=new MapContextImpl(conf,taskId,reader,writer,committer,reporter,split); Context context=new WrappedMapper().getMapContext(mapcontext); long start=System.currentTimeMillis(); LOG.info("start:" + start); LongWritable key=new LongWritable(start + 2000); LongWritable value=new LongWritable(start + 2000); test.map(key,value,context); LOG.info("finish:" + System.currentTimeMillis()); assertTrue(System.currentTimeMillis() >= (start + 2000)); test.cleanup(context); assertEquals(1,writer.getData().size()); }

Class: org.apache.hadoop.mapred.gridmix.TestGridmixMemoryEmulation

InternalCallVerifier EqualityVerifier 
/** * Test disabled task heap options configuration in {@link GridmixJob}. */ @Test @SuppressWarnings("deprecation") public void testJavaHeapOptionsDisabled() throws Exception { Configuration gridmixConf=new Configuration(); gridmixConf.setBoolean(GridmixJob.GRIDMIX_TASK_JVM_OPTIONS_ENABLE,false); gridmixConf.set(MRJobConfig.MAP_JAVA_OPTS,"-Xmx1m"); gridmixConf.set(MRJobConfig.REDUCE_JAVA_OPTS,"-Xmx2m"); gridmixConf.set(JobConf.MAPRED_TASK_JAVA_OPTS,"-Xmx3m"); final JobConf originalConf=new JobConf(); originalConf.set(MRJobConfig.MAP_JAVA_OPTS,"-Xmx10m"); originalConf.set(MRJobConfig.REDUCE_JAVA_OPTS,"-Xmx20m"); originalConf.set(JobConf.MAPRED_TASK_JAVA_OPTS,"-Xmx30m"); MockJob story=new MockJob(originalConf){ public JobConf getJobConf(){ return originalConf; } } ; GridmixJob job=new DummyGridmixJob(gridmixConf,story); Job simulatedJob=job.getJob(); Configuration simulatedConf=simulatedJob.getConfiguration(); assertEquals("Map heap options works when disabled!","-Xmx1m",simulatedConf.get(MRJobConfig.MAP_JAVA_OPTS)); assertEquals("Reduce heap options works when disabled!","-Xmx2m",simulatedConf.get(MRJobConfig.REDUCE_JAVA_OPTS)); assertEquals("Task heap options works when disabled!","-Xmx3m",simulatedConf.get(JobConf.MAPRED_TASK_JAVA_OPTS)); }

InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test {@link TotalHeapUsageEmulatorPlugin}. */ @Test public void testTotalHeapUsageEmulatorPlugin() throws Exception { Configuration conf=new Configuration(); ResourceCalculatorPlugin monitor=new DummyResourceCalculatorPlugin(); long maxHeapUsage=1024 * TotalHeapUsageEmulatorPlugin.ONE_MB; conf.setLong(DummyResourceCalculatorPlugin.MAXPMEM_TESTING_PROPERTY,maxHeapUsage); monitor.setConf(conf); conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F); long targetHeapUsageInMB=200; FakeProgressive fakeProgress=new FakeProgressive(); FakeHeapUsageEmulatorCore fakeCore=new FakeHeapUsageEmulatorCore(); FakeHeapUsageEmulatorPlugin heapPlugin=new FakeHeapUsageEmulatorPlugin(fakeCore); ResourceUsageMetrics invalidUsage=TestResourceUsageEmulators.createMetrics(0); heapPlugin.initialize(conf,invalidUsage,null,null); int numCallsPre=fakeCore.getNumCalls(); long heapUsagePre=fakeCore.getHeapUsageInMB(); heapPlugin.emulate(); int numCallsPost=fakeCore.getNumCalls(); long heapUsagePost=fakeCore.getHeapUsageInMB(); assertEquals("Disabled heap usage emulation plugin works!",numCallsPre,numCallsPost); assertEquals("Disabled heap usage emulation plugin works!",heapUsagePre,heapUsagePost); float progress=heapPlugin.getProgress(); assertEquals("Invalid progress of disabled cumulative heap usage emulation " + "plugin!",1.0f,progress,0f); Boolean failed=null; invalidUsage=TestResourceUsageEmulators.createMetrics(maxHeapUsage + TotalHeapUsageEmulatorPlugin.ONE_MB); try { heapPlugin.initialize(conf,invalidUsage,monitor,null); failed=false; } catch ( Exception e) { failed=true; } assertNotNull("Fail case failure!",failed); assertTrue("Expected failure!",failed); ResourceUsageMetrics metrics=TestResourceUsageEmulators.createMetrics(targetHeapUsageInMB * TotalHeapUsageEmulatorPlugin.ONE_MB); testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,10); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL,0.2F); testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,5); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F); conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0.5F); testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,120,2); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,0.5F); conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F); testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,200,10); conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0.25F); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,0.5F); testEmulationAccuracy(conf,fakeCore,monitor,metrics,heapPlugin,162,6); fakeProgress=new FakeProgressive(); conf.setFloat(TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO,0F); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_LOAD_RATIO,1F); conf.setFloat(TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL,0.25F); heapPlugin.initialize(conf,metrics,monitor,fakeProgress); fakeCore.resetFake(); long initHeapUsage=fakeCore.getHeapUsageInMB(); long initNumCallsUsage=fakeCore.getNumCalls(); testEmulationBoundary(0F,fakeCore,fakeProgress,heapPlugin,initHeapUsage,initNumCallsUsage,"[no-op, 0 progress]"); testEmulationBoundary(0.24F,fakeCore,fakeProgress,heapPlugin,initHeapUsage,initNumCallsUsage,"[no-op, 24% progress]"); testEmulationBoundary(0.25F,fakeCore,fakeProgress,heapPlugin,targetHeapUsageInMB / 4,1,"[op, 25% progress]"); testEmulationBoundary(0.80F,fakeCore,fakeProgress,heapPlugin,(targetHeapUsageInMB * 4) / 5,2,"[op, 80% progress]"); testEmulationBoundary(1F,fakeCore,fakeProgress,heapPlugin,targetHeapUsageInMB,3,"[op, 100% progress]"); }

InternalCallVerifier EqualityVerifier 
/** * Test {@link TotalHeapUsageEmulatorPlugin}'s core heap usage emulation * engine. */ @Test public void testHeapUsageEmulator() throws IOException { FakeHeapUsageEmulatorCore heapEmulator=new FakeHeapUsageEmulatorCore(); long testSizeInMB=10; long previousHeap=heapEmulator.getHeapUsageInMB(); heapEmulator.load(testSizeInMB); long currentHeap=heapEmulator.getHeapUsageInMB(); assertEquals("Default heap emulator failed to load 10mb",previousHeap + testSizeInMB,currentHeap); heapEmulator.resetFake(); assertEquals("Default heap emulator failed to reset",0,heapEmulator.getHeapUsageInMB()); }

Class: org.apache.hadoop.mapred.gridmix.TestGridmixSubmission

APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
@Test(timeout=100000) public void testMain() throws Exception { SecurityManager securityManager=System.getSecurityManager(); final ByteArrayOutputStream bytes=new ByteArrayOutputStream(); final PrintStream out=new PrintStream(bytes); final PrintStream oldOut=System.out; System.setErr(out); ExitUtil.disableSystemExit(); try { String[] argv=new String[0]; DebugGridmix.main(argv); } catch ( ExitUtil.ExitException e) { assertEquals("ExitException",e.getMessage()); ExitUtil.resetFirstExitException(); } finally { System.setErr(oldOut); System.setSecurityManager(securityManager); } String print=bytes.toString(); assertTrue(print.contains("Usage: gridmix [-generate ] [-users URI] [-Dname=value ...] ")); assertTrue(print.contains("e.g. gridmix -generate 100m foo -")); }

Class: org.apache.hadoop.mapred.gridmix.TestGridmixSummary

InternalCallVerifier EqualityVerifier 
/** * Test {@link ClusterSummarizer}. */ @Test public void testClusterSummarizer() throws IOException { ClusterSummarizer cs=new ClusterSummarizer(); Configuration conf=new Configuration(); String jt="test-jt:1234"; String nn="test-nn:5678"; conf.set(JTConfig.JT_IPC_ADDRESS,jt); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,nn); cs.start(conf); assertEquals("JT name mismatch",jt,cs.getJobTrackerInfo()); assertEquals("NN name mismatch",nn,cs.getNamenodeInfo()); ClusterStats cStats=ClusterStats.getClusterStats(); conf.set(JTConfig.JT_IPC_ADDRESS,"local"); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,"local"); JobClient jc=new JobClient(conf); cStats.setClusterMetric(jc.getClusterStatus()); cs.update(cStats); assertEquals("Cluster summary test failed!",1,cs.getMaxMapTasks()); assertEquals("Cluster summary test failed!",1,cs.getMaxReduceTasks()); assertEquals("Cluster summary test failed!",1,cs.getNumActiveTrackers()); assertEquals("Cluster summary test failed!",0,cs.getNumBlacklistedTrackers()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
/** * Test {@link DataStatistics}. */ @Test public void testDataStatistics() throws Exception { DataStatistics stats=new DataStatistics(10,2,true); assertEquals("Data size mismatch",10,stats.getDataSize()); assertEquals("Num files mismatch",2,stats.getNumFiles()); assertTrue("Compression configuration mismatch",stats.isDataCompressed()); stats=new DataStatistics(100,5,false); assertEquals("Data size mismatch",100,stats.getDataSize()); assertEquals("Num files mismatch",5,stats.getNumFiles()); assertFalse("Compression configuration mismatch",stats.isDataCompressed()); Configuration conf=new Configuration(); Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")); Path testDir=new Path(rootTempDir,"testDataStatistics"); FileSystem fs=testDir.getFileSystem(conf); fs.delete(testDir,true); Path testInputDir=new Path(testDir,"test"); fs.mkdirs(testInputDir); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); Boolean failed=null; try { GenerateData.publishDataStatistics(testInputDir,1024L,conf); failed=false; } catch ( RuntimeException e) { failed=true; } assertNotNull("Expected failure!",failed); assertTrue("Compression data publishing error",failed); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false); stats=GenerateData.publishDataStatistics(testInputDir,1024L,conf); assertEquals("Data size mismatch",0,stats.getDataSize()); assertEquals("Num files mismatch",0,stats.getNumFiles()); assertFalse("Compression configuration mismatch",stats.isDataCompressed()); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false); Path inputDataFile=new Path(testInputDir,"test"); long size=UtilsForTests.createTmpFileDFS(fs,inputDataFile,FsPermission.createImmutable((short)777),"hi hello bye").size(); stats=GenerateData.publishDataStatistics(testInputDir,-1,conf); assertEquals("Data size mismatch",size,stats.getDataSize()); assertEquals("Num files mismatch",1,stats.getNumFiles()); assertFalse("Compression configuration mismatch",stats.isDataCompressed()); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); failed=null; try { GenerateData.publishDataStatistics(testInputDir,1234L,conf); failed=false; } catch ( RuntimeException e) { failed=true; } assertNotNull("Expected failure!",failed); assertTrue("Compression data publishing error",failed); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,false); fs.delete(inputDataFile,false); inputDataFile=new Path(testInputDir,"test.gz"); size=UtilsForTests.createTmpFileDFS(fs,inputDataFile,FsPermission.createImmutable((short)777),"hi hello").size(); stats=GenerateData.publishDataStatistics(testInputDir,1234L,conf); assertEquals("Data size mismatch",size,stats.getDataSize()); assertEquals("Num files mismatch",1,stats.getNumFiles()); assertFalse("Compression configuration mismatch",stats.isDataCompressed()); CompressionEmulationUtil.setCompressionEmulationEnabled(conf,true); stats=GenerateData.publishDataStatistics(testInputDir,1234L,conf); assertEquals("Data size mismatch",size,stats.getDataSize()); assertEquals("Num files mismatch",1,stats.getNumFiles()); assertTrue("Compression configuration mismatch",stats.isDataCompressed()); }

APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
/** * Test {@link ExecutionSummarizer}. */ @Test @SuppressWarnings({"unchecked","rawtypes"}) public void testExecutionSummarizer() throws IOException { Configuration conf=new Configuration(); ExecutionSummarizer es=new ExecutionSummarizer(); assertEquals("ExecutionSummarizer init failed",Summarizer.NA,es.getCommandLineArgsString()); long startTime=System.currentTimeMillis(); String[] initArgs=new String[]{"-Xmx20m","-Dtest.args='test'"}; es=new ExecutionSummarizer(initArgs); assertEquals("ExecutionSummarizer init failed","-Xmx20m -Dtest.args='test'",es.getCommandLineArgsString()); assertTrue("Start time mismatch",es.getStartTime() >= startTime); assertTrue("Start time mismatch",es.getStartTime() <= System.currentTimeMillis()); es.update(null); assertEquals("ExecutionSummarizer init failed",0,es.getSimulationStartTime()); testExecutionSummarizer(0,0,0,0,0,0,0,es); long simStartTime=System.currentTimeMillis(); es.start(null); assertTrue("Simulation start time mismatch",es.getSimulationStartTime() >= simStartTime); assertTrue("Simulation start time mismatch",es.getSimulationStartTime() <= System.currentTimeMillis()); JobStats stats=generateFakeJobStats(1,10,true,false); es.update(stats); testExecutionSummarizer(1,10,0,1,1,0,0,es); stats=generateFakeJobStats(5,1,false,false); es.update(stats); testExecutionSummarizer(6,11,0,2,1,1,0,es); stats=generateFakeJobStats(1,1,true,true); es.update(stats); testExecutionSummarizer(7,12,0,3,1,1,1,es); stats=generateFakeJobStats(2,2,false,true); es.update(stats); testExecutionSummarizer(9,14,0,4,1,1,2,es); JobFactory factory=new FakeJobFactory(conf); factory.numJobsInTrace=3; Path rootTempDir=new Path(System.getProperty("test.build.data","/tmp")); Path testDir=new Path(rootTempDir,"testGridmixSummary"); Path testTraceFile=new Path(testDir,"test-trace.json"); FileSystem fs=FileSystem.getLocal(conf); fs.create(testTraceFile).close(); UserResolver resolver=new RoundRobinUserResolver(); DataStatistics dataStats=new DataStatistics(100,2,true); String policy=GridmixJobSubmissionPolicy.REPLAY.name(); conf.set(GridmixJobSubmissionPolicy.JOB_SUBMISSION_POLICY,policy); es.finalize(factory,testTraceFile.toString(),1024L,resolver,dataStats,conf); assertEquals("Mismtach in num jobs in trace",3,es.getNumJobsInTrace()); String tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString()); assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature()); Path qPath=fs.makeQualified(testTraceFile); assertEquals("Mismatch in trace filename",qPath.toString(),es.getInputTraceLocation()); assertEquals("Mismatch in expected data size","1 K",es.getExpectedDataSize()); assertEquals("Mismatch in input data statistics",ExecutionSummarizer.stringifyDataStatistics(dataStats),es.getInputDataStatistics()); assertEquals("Mismatch in user resolver",resolver.getClass().getName(),es.getUserResolver()); assertEquals("Mismatch in policy",policy,es.getJobSubmissionPolicy()); es.finalize(factory,testTraceFile.toString(),1024 * 1024 * 1024* 10L,resolver,dataStats,conf); assertEquals("Mismatch in expected data size","10 G",es.getExpectedDataSize()); fs.delete(testTraceFile,false); try { Thread.sleep(1000); } catch ( InterruptedException ie) { } fs.create(testTraceFile).close(); es.finalize(factory,testTraceFile.toString(),0L,resolver,dataStats,conf); assertEquals("Mismatch in trace data size",Summarizer.NA,es.getExpectedDataSize()); assertFalse("Mismatch in trace signature",tid.equals(es.getInputTraceSignature())); tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString()); assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature()); testTraceFile=new Path(testDir,"test-trace2.json"); fs.create(testTraceFile).close(); es.finalize(factory,testTraceFile.toString(),0L,resolver,dataStats,conf); assertFalse("Mismatch in trace signature",tid.equals(es.getInputTraceSignature())); tid=ExecutionSummarizer.getTraceSignature(testTraceFile.toString()); assertEquals("Mismatch in trace signature",tid,es.getInputTraceSignature()); es.finalize(factory,"-",0L,resolver,dataStats,conf); assertEquals("Mismatch in trace signature",Summarizer.NA,es.getInputTraceSignature()); assertEquals("Mismatch in trace file location",Summarizer.NA,es.getInputTraceLocation()); }

Class: org.apache.hadoop.mapred.gridmix.TestPseudoLocalFs

APIUtilityVerifier InternalCallVerifier EqualityVerifier 
/** * Test Pseudo Local File System methods like getFileStatus(), create(), * open(), exists() for
  • valid file paths and
  • invalid file paths. * @throws IOException */ @Test public void testPseudoLocalFsFileNames() throws IOException { PseudoLocalFs pfs=new PseudoLocalFs(); Configuration conf=new Configuration(); conf.setClass("fs.pseudo.impl",PseudoLocalFs.class,FileSystem.class); Path path=new Path("pseudo:///myPsedoFile.1234"); FileSystem testFs=path.getFileSystem(conf); assertEquals("Failed to obtain a pseudo local file system object from path",pfs.getUri().getScheme(),testFs.getUri().getScheme()); path=new Path("file:///myPsedoFile.12345"); validateGetFileStatus(pfs,path,false); validateCreate(pfs,path,false); validateOpen(pfs,path,false); validateExists(pfs,path,false); path=new Path("pseudo:///myPsedoFile"); validateGetFileStatus(pfs,path,false); validateCreate(pfs,path,false); validateOpen(pfs,path,false); validateExists(pfs,path,false); path=new Path("pseudo:///myPsedoFile.txt"); validateGetFileStatus(pfs,path,false); validateCreate(pfs,path,false); validateOpen(pfs,path,false); validateExists(pfs,path,false); long fileSize=231456; path=PseudoLocalFs.generateFilePath("my.Psedo.File",fileSize); assertEquals("generateFilePath() failed.",fileSize,pfs.validateFileNameFormat(path)); validateGetFileStatus(pfs,path,true); validateCreate(pfs,path,true); validateOpen(pfs,path,true); validateExists(pfs,path,true); path=new Path("myPsedoFile.1237"); path=path.makeQualified(pfs); validateGetFileStatus(pfs,path,true); validateCreate(pfs,path,true); validateOpen(pfs,path,true); validateExists(pfs,path,true); }

  • APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Test if a file on PseudoLocalFs of a specific size can be opened and read. * Validate the size of the data read. * Test the read methods of {@link PseudoLocalFs.RandomInputStream}. * @throws Exception */ @Test public void testPseudoLocalFsFileSize() throws Exception { long fileSize=10000; Path path=PseudoLocalFs.generateFilePath("myPsedoFile",fileSize); PseudoLocalFs pfs=new PseudoLocalFs(); pfs.create(path); InputStream in=pfs.open(path,0); long totalSize=0; while (in.read() >= 0) { ++totalSize; } in.close(); assertEquals("File size mismatch with read().",fileSize,totalSize); in=pfs.open(path,0); totalSize=0; byte[] b=new byte[1024]; int bytesRead=in.read(b); while (bytesRead >= 0) { totalSize+=bytesRead; bytesRead=in.read(b); } assertEquals("File size mismatch with read(byte[]).",fileSize,totalSize); }

    Class: org.apache.hadoop.mapred.gridmix.TestRandomTextDataGenerator

    InternalCallVerifier EqualityVerifier 
    /** * Test if {@link RandomTextDataGenerator} can generate random words of * desired size. */ @Test public void testRandomTextDataGenerator(){ RandomTextDataGenerator rtdg=new RandomTextDataGenerator(10,0L,5); List words=rtdg.getRandomWords(); assertEquals("List size mismatch",10,words.size()); Set wordsSet=new HashSet(words); assertEquals("List size mismatch due to duplicates",10,wordsSet.size()); for ( String word : wordsSet) { assertEquals("Word size mismatch",5,word.length()); } }

    Class: org.apache.hadoop.mapred.gridmix.TestResourceUsageEmulators

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test {@link LoadJob.ResourceUsageMatcherRunner}. */ @Test @SuppressWarnings("unchecked") public void testResourceUsageMatcherRunner() throws Exception { Configuration conf=new Configuration(); FakeProgressive progress=new FakeProgressive(); conf.setClass(TTConfig.TT_RESOURCE_CALCULATOR_PLUGIN,DummyResourceCalculatorPlugin.class,ResourceCalculatorPlugin.class); conf.setClass(ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS,TestResourceUsageEmulatorPlugin.class,ResourceUsageEmulatorPlugin.class); long currentTime=System.currentTimeMillis(); TaskAttemptID id=new TaskAttemptID("test",1,TaskType.MAP,1,1); StatusReporter reporter=new DummyReporter(progress); TaskInputOutputContext context=new MapContextImpl(conf,id,null,null,null,reporter,null); FakeResourceUsageMatcherRunner matcher=new FakeResourceUsageMatcherRunner(context,null); String identifier=TestResourceUsageEmulatorPlugin.DEFAULT_IDENTIFIER; long initTime=TestResourceUsageEmulatorPlugin.testInitialization(identifier,conf); assertTrue("ResourceUsageMatcherRunner failed to initialize the" + " configured plugin",initTime > currentTime); assertEquals("Progress mismatch in ResourceUsageMatcherRunner",0,progress.getProgress(),0D); progress.setProgress(0.01f); currentTime=System.currentTimeMillis(); matcher.test(); long emulateTime=TestResourceUsageEmulatorPlugin.testEmulation(identifier,conf); assertTrue("ProgressBasedResourceUsageMatcher failed to load and emulate" + " the configured plugin",emulateTime > currentTime); }

    InternalCallVerifier EqualityVerifier 
    /** * Test {@link CumulativeCpuUsageEmulatorPlugin}'s core CPU usage emulation * engine. */ @Test public void testCpuUsageEmulator() throws IOException { long target=100000L; int unitUsage=50; FakeCpuUsageEmulatorCore fakeCpuEmulator=new FakeCpuUsageEmulatorCore(); fakeCpuEmulator.setUnitUsage(unitUsage); FakeResourceUsageMonitor fakeMonitor=new FakeResourceUsageMonitor(fakeCpuEmulator); fakeCpuEmulator.calibrate(fakeMonitor,target); assertEquals("Fake calibration failed",100,fakeMonitor.getCumulativeCpuTime()); assertEquals("Fake calibration failed",100,fakeCpuEmulator.getCpuUsage()); assertEquals("Fake calibration failed",2,fakeCpuEmulator.getNumCalls()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test {@link CumulativeCpuUsageEmulatorPlugin}. */ @Test public void testCumulativeCpuUsageEmulatorPlugin() throws Exception { Configuration conf=new Configuration(); long targetCpuUsage=1000L; int unitCpuUsage=50; FakeProgressive fakeProgress=new FakeProgressive(); FakeCpuUsageEmulatorCore fakeCore=new FakeCpuUsageEmulatorCore(); fakeCore.setUnitUsage(unitCpuUsage); CumulativeCpuUsageEmulatorPlugin cpuPlugin=new CumulativeCpuUsageEmulatorPlugin(fakeCore); ResourceUsageMetrics invalidUsage=createMetrics(0); cpuPlugin.initialize(conf,invalidUsage,null,null); int numCallsPre=fakeCore.getNumCalls(); long cpuUsagePre=fakeCore.getCpuUsage(); cpuPlugin.emulate(); int numCallsPost=fakeCore.getNumCalls(); long cpuUsagePost=fakeCore.getCpuUsage(); assertEquals("Disabled cumulative CPU usage emulation plugin works!",numCallsPre,numCallsPost); assertEquals("Disabled cumulative CPU usage emulation plugin works!",cpuUsagePre,cpuUsagePost); float progress=cpuPlugin.getProgress(); assertEquals("Invalid progress of disabled cumulative CPU usage emulation " + "plugin!",1.0f,progress,0f); ResourceUsageMetrics metrics=createMetrics(targetCpuUsage); ResourceCalculatorPlugin monitor=new FakeResourceUsageMonitor(fakeCore); testEmulationAccuracy(conf,fakeCore,monitor,metrics,cpuPlugin,targetCpuUsage,targetCpuUsage / unitCpuUsage); conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,0.2F); testEmulationAccuracy(conf,fakeCore,monitor,metrics,cpuPlugin,targetCpuUsage,targetCpuUsage / unitCpuUsage); fakeProgress=new FakeProgressive(); fakeCore.reset(); fakeCore.setUnitUsage(1); conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,0.25F); cpuPlugin.initialize(conf,metrics,monitor,fakeProgress); long initCpuUsage=monitor.getCumulativeCpuTime(); long initNumCalls=fakeCore.getNumCalls(); testEmulationBoundary(0F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[no-op, 0 progress]"); testEmulationBoundary(0.24F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[no-op, 24% progress]"); testEmulationBoundary(0.25F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[op, 25% progress]"); testEmulationBoundary(0.80F,fakeCore,fakeProgress,cpuPlugin,410,410,"[op, 80% progress]"); testEmulationBoundary(1F,fakeCore,fakeProgress,cpuPlugin,targetCpuUsage,targetCpuUsage,"[op, 100% progress]"); fakeProgress=new FakeProgressive(); fakeCore.reset(); fakeCore.setUnitUsage(unitCpuUsage); conf.setFloat(CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL,0.40F); cpuPlugin.initialize(conf,metrics,monitor,fakeProgress); initCpuUsage=monitor.getCumulativeCpuTime(); initNumCalls=fakeCore.getNumCalls(); testEmulationBoundary(0F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[no-op, 0 progress]"); testEmulationBoundary(0.39F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[no-op, 39% progress]"); testEmulationBoundary(0.40F,fakeCore,fakeProgress,cpuPlugin,initCpuUsage,initNumCalls,"[op, 40% progress]"); testEmulationBoundary(0.90F,fakeCore,fakeProgress,cpuPlugin,700,700 / unitCpuUsage,"[op, 90% progress]"); testEmulationBoundary(1F,fakeCore,fakeProgress,cpuPlugin,targetCpuUsage,targetCpuUsage / unitCpuUsage,"[op, 100% progress]"); }

    Class: org.apache.hadoop.mapred.gridmix.TestSleepJob

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testMapTasksOnlySleepJobs() throws Exception { Configuration configuration=GridmixTestUtils.mrvl.getConfig(); DebugJobProducer jobProducer=new DebugJobProducer(5,configuration); configuration.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY,true); UserGroupInformation ugi=UserGroupInformation.getLoginUser(); JobStory story; int seq=1; while ((story=jobProducer.getNextJob()) != null) { GridmixJob gridmixJob=JobCreator.SLEEPJOB.createGridmixJob(configuration,0,story,new Path("ignored"),ugi,seq++); gridmixJob.buildSplits(null); Job job=gridmixJob.call(); assertEquals(0,job.getNumReduceTasks()); } jobProducer.close(); assertEquals(6,seq); }

    Class: org.apache.hadoop.mapred.gridmix.TestUserResolve

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testSubmitterResolver() throws Exception { final UserResolver rslv=new SubmitterUserResolver(); assertFalse(rslv.needsTargetUsersList()); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); assertEquals(ugi,rslv.getTargetUgi((UserGroupInformation)null)); }

    Class: org.apache.hadoop.mapred.jobcontrol.TestJobControl

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testGetAssignedJobId() throws Exception { JobConf jc=new JobConf(); Job j=new Job(jc); assertNull(j.getAssignedJobID()); org.apache.hadoop.mapreduce.Job mockjob=mock(org.apache.hadoop.mapreduce.Job.class); org.apache.hadoop.mapreduce.JobID jid=new org.apache.hadoop.mapreduce.JobID("test",0); when(mockjob.getJobID()).thenReturn(jid); j.setJob(mockjob); JobID expected=new JobID("test",0); assertEquals(expected,j.getAssignedJobID()); verify(mockjob).getJobID(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testAddingDependingJob() throws Exception { Job job_1=getCopyJob(); ArrayList dependingJobs=new ArrayList(); JobControl jc=new JobControl("Test"); jc.addJob(job_1); Assert.assertEquals(Job.WAITING,job_1.getState()); Assert.assertTrue(job_1.addDependingJob(new Job(job_1.getJobConf(),dependingJobs))); }

    InternalCallVerifier EqualityVerifier 
    @SuppressWarnings("deprecation") @Test(timeout=30000) public void testJobState() throws Exception { Job job_1=getCopyJob(); JobControl jc=new JobControl("Test"); jc.addJob(job_1); Assert.assertEquals(Job.WAITING,job_1.getState()); job_1.setState(Job.SUCCESS); Assert.assertEquals(Job.WAITING,job_1.getState()); org.apache.hadoop.mapreduce.Job mockjob=mock(org.apache.hadoop.mapreduce.Job.class); org.apache.hadoop.mapreduce.JobID jid=new org.apache.hadoop.mapreduce.JobID("test",0); when(mockjob.getJobID()).thenReturn(jid); job_1.setJob(mockjob); Assert.assertEquals("job_test_0000",job_1.getMapredJobID()); job_1.setMapredJobID("job_test_0001"); Assert.assertEquals("job_test_0000",job_1.getMapredJobID()); jc.stop(); }

    Class: org.apache.hadoop.mapred.lib.TestChain

    InternalCallVerifier EqualityVerifier 
    @Test public void testSetReducerWithReducerByValueAsFalse() throws Exception { JobConf jobConf=new JobConf(); JobConf reducerConf=new JobConf(); Chain.setReducer(jobConf,MyReducer.class,Object.class,Object.class,Object.class,Object.class,false,reducerConf); boolean reduceByValue=reducerConf.getBoolean("chain.reducer.byValue",true); Assert.assertEquals("It should set chain.reducer.byValue as false " + "in reducerConf when we give value as false",false,reduceByValue); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSetReducerWithReducerByValueAsTrue() throws Exception { JobConf jobConf=new JobConf(); JobConf reducerConf=new JobConf(); Chain.setReducer(jobConf,MyReducer.class,Object.class,Object.class,Object.class,Object.class,true,reducerConf); boolean reduceByValue=reducerConf.getBoolean("chain.reducer.byValue",false); Assert.assertEquals("It should set chain.reducer.byValue as true " + "in reducerConf when we give value as true",true,reduceByValue); }

    Class: org.apache.hadoop.mapred.lib.TestKeyFieldBasedPartitioner

    InternalCallVerifier EqualityVerifier 
    /** * Test is key-field-based partitioned works with empty key. */ @Test public void testEmptyKey() throws Exception { KeyFieldBasedPartitioner kfbp=new KeyFieldBasedPartitioner(); JobConf conf=new JobConf(); conf.setInt("num.key.fields.for.partition",10); kfbp.configure(conf); assertEquals("Empty key should map to 0th partition",0,kfbp.getPartition(new Text(),new Text(),10)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testMultiConfigure(){ KeyFieldBasedPartitioner kfbp=new KeyFieldBasedPartitioner(); JobConf conf=new JobConf(); conf.set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS,"-k1,1"); kfbp.setConf(conf); Text key=new Text("foo\tbar"); Text val=new Text("val"); int partNum=kfbp.getPartition(key,val,4096); kfbp.configure(conf); assertEquals(partNum,kfbp.getPartition(key,val,4096)); }

    Class: org.apache.hadoop.mapred.lib.db.TestDBInputFormat

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * test configuration for db. should works DBConfiguration.* parameters. */ @Test(timeout=5000) public void testSetInput(){ JobConf configuration=new JobConf(); String[] fieldNames={"field1","field2"}; DBInputFormat.setInput(configuration,NullDBWritable.class,"table","conditions","orderBy",fieldNames); assertEquals("org.apache.hadoop.mapred.lib.db.DBInputFormat$NullDBWritable",configuration.getClass(DBConfiguration.INPUT_CLASS_PROPERTY,null).getName()); assertEquals("table",configuration.get(DBConfiguration.INPUT_TABLE_NAME_PROPERTY,null)); String[] fields=configuration.getStrings(DBConfiguration.INPUT_FIELD_NAMES_PROPERTY); assertEquals("field1",fields[0]); assertEquals("field2",fields[1]); assertEquals("conditions",configuration.get(DBConfiguration.INPUT_CONDITIONS_PROPERTY,null)); assertEquals("orderBy",configuration.get(DBConfiguration.INPUT_ORDER_BY_PROPERTY,null)); configuration=new JobConf(); DBInputFormat.setInput(configuration,NullDBWritable.class,"query","countQuery"); assertEquals("query",configuration.get(DBConfiguration.INPUT_QUERY,null)); assertEquals("countQuery",configuration.get(DBConfiguration.INPUT_COUNT_QUERY,null)); JobConf jConfiguration=new JobConf(); DBConfiguration.configureDB(jConfiguration,"driverClass","dbUrl","user","password"); assertEquals("driverClass",jConfiguration.get(DBConfiguration.DRIVER_CLASS_PROPERTY)); assertEquals("dbUrl",jConfiguration.get(DBConfiguration.URL_PROPERTY)); assertEquals("user",jConfiguration.get(DBConfiguration.USERNAME_PROPERTY)); assertEquals("password",jConfiguration.get(DBConfiguration.PASSWORD_PROPERTY)); jConfiguration=new JobConf(); DBConfiguration.configureDB(jConfiguration,"driverClass","dbUrl"); assertEquals("driverClass",jConfiguration.get(DBConfiguration.DRIVER_CLASS_PROPERTY)); assertEquals("dbUrl",jConfiguration.get(DBConfiguration.URL_PROPERTY)); assertNull(jConfiguration.get(DBConfiguration.USERNAME_PROPERTY)); assertNull(jConfiguration.get(DBConfiguration.PASSWORD_PROPERTY)); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * test DBRecordReader. This reader should creates keys, values, know about position.. */ @SuppressWarnings("unchecked") @Test(timeout=5000) public void testDBRecordReader() throws Exception { JobConf job=mock(JobConf.class); DBConfiguration dbConfig=mock(DBConfiguration.class); String[] fields={"field1","filed2"}; @SuppressWarnings("rawtypes") DBRecordReader reader=new DBInputFormat().new DBRecordReader(new DBInputSplit(),NullDBWritable.class,job,DriverForTest.getConnection(),dbConfig,"condition",fields,"table"); LongWritable key=reader.createKey(); assertEquals(0,key.get()); DBWritable value=reader.createValue(); assertEquals("org.apache.hadoop.mapred.lib.db.DBInputFormat$NullDBWritable",value.getClass().getName()); assertEquals(0,reader.getPos()); assertFalse(reader.next(key,value)); }

    InternalCallVerifier EqualityVerifier 
    /** * test DBInputFormat class. Class should split result for chunks * @throws Exception */ @Test(timeout=10000) public void testDBInputFormat() throws Exception { JobConf configuration=new JobConf(); setupDriver(configuration); DBInputFormat format=new DBInputFormat(); format.setConf(configuration); format.setConf(configuration); DBInputFormat.DBInputSplit splitter=new DBInputFormat.DBInputSplit(1,10); Reporter reporter=mock(Reporter.class); RecordReader reader=format.getRecordReader(splitter,configuration,reporter); configuration.setInt(MRJobConfig.NUM_MAPS,3); InputSplit[] lSplits=format.getSplits(configuration,3); assertEquals(5,lSplits[0].getLength()); assertEquals(3,lSplits.length); assertEquals(LongWritable.class,reader.createKey().getClass()); assertEquals(0,reader.getPos()); assertEquals(0,reader.getProgress(),0.001); reader.close(); }

    Class: org.apache.hadoop.mapred.pipes.TestPipeApplication

    BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * test org.apache.hadoop.mapred.pipes.Submitter * @throws Exception */ @Test public void testSubmitter() throws Exception { JobConf conf=new JobConf(); File[] psw=cleanTokenPasswordFile(); System.setProperty("test.build.data","target/tmp/build/TEST_SUBMITTER_MAPPER/data"); conf.set("hadoop.log.dir","target/tmp"); Submitter.setIsJavaMapper(conf,false); Submitter.setIsJavaReducer(conf,false); Submitter.setKeepCommandFile(conf,false); Submitter.setIsJavaRecordReader(conf,false); Submitter.setIsJavaRecordWriter(conf,false); PipesPartitioner partitioner=new PipesPartitioner(); partitioner.configure(conf); Submitter.setJavaPartitioner(conf,partitioner.getClass()); assertEquals(PipesPartitioner.class,(Submitter.getJavaPartitioner(conf))); SecurityManager securityManager=System.getSecurityManager(); PrintStream oldps=System.out; ByteArrayOutputStream out=new ByteArrayOutputStream(); ExitUtil.disableSystemExit(); try { System.setOut(new PrintStream(out)); Submitter.main(new String[0]); fail(); } catch ( ExitUtil.ExitException e) { assertTrue(out.toString().contains("")); assertTrue(out.toString().contains("bin/hadoop pipes")); assertTrue(out.toString().contains("[-input ] // Input directory")); assertTrue(out.toString().contains("[-output ] // Output directory")); assertTrue(out.toString().contains("[-jar // jar filename")); assertTrue(out.toString().contains("[-inputformat ] // InputFormat class")); assertTrue(out.toString().contains("[-map ] // Java Map class")); assertTrue(out.toString().contains("[-partitioner ] // Java Partitioner")); assertTrue(out.toString().contains("[-reduce ] // Java Reduce class")); assertTrue(out.toString().contains("[-writer ] // Java RecordWriter")); assertTrue(out.toString().contains("[-program ] // executable URI")); assertTrue(out.toString().contains("[-reduces ] // number of reduces")); assertTrue(out.toString().contains("[-lazyOutput ] // createOutputLazily")); assertTrue(out.toString().contains("-conf specify an application configuration file")); assertTrue(out.toString().contains("-D use value for given property")); assertTrue(out.toString().contains("-fs specify a namenode")); assertTrue(out.toString().contains("-jt specify a job tracker")); assertTrue(out.toString().contains("-files specify comma separated files to be copied to the map reduce cluster")); assertTrue(out.toString().contains("-libjars specify comma separated jar files to include in the classpath.")); assertTrue(out.toString().contains("-archives specify comma separated archives to be unarchived on the compute machines.")); } finally { System.setOut(oldps); System.setSecurityManager(securityManager); if (psw != null) { for ( File file : psw) { file.deleteOnExit(); } } } try { File fCommand=getFileCommand(null); String[] args=new String[22]; File input=new File(workSpace + File.separator + "input"); if (!input.exists()) { Assert.assertTrue(input.createNewFile()); } File outPut=new File(workSpace + File.separator + "output"); FileUtil.fullyDelete(outPut); args[0]="-input"; args[1]=input.getAbsolutePath(); args[2]="-output"; args[3]=outPut.getAbsolutePath(); args[4]="-inputformat"; args[5]="org.apache.hadoop.mapred.TextInputFormat"; args[6]="-map"; args[7]="org.apache.hadoop.mapred.lib.IdentityMapper"; args[8]="-partitioner"; args[9]="org.apache.hadoop.mapred.pipes.PipesPartitioner"; args[10]="-reduce"; args[11]="org.apache.hadoop.mapred.lib.IdentityReducer"; args[12]="-writer"; args[13]="org.apache.hadoop.mapred.TextOutputFormat"; args[14]="-program"; args[15]=fCommand.getAbsolutePath(); args[16]="-reduces"; args[17]="2"; args[18]="-lazyOutput"; args[19]="lazyOutput"; args[20]="-jobconf"; args[21]="mapreduce.pipes.isjavarecordwriter=false,mapreduce.pipes.isjavarecordreader=false"; Submitter.main(args); fail(); } catch ( ExitUtil.ExitException e) { assertEquals(e.status,0); } finally { System.setOut(oldps); System.setSecurityManager(securityManager); } }

    InternalCallVerifier EqualityVerifier 
    /** * test PipesPartitioner * test set and get data from PipesPartitioner */ @Test public void testPipesPartitioner(){ PipesPartitioner partitioner=new PipesPartitioner(); JobConf configuration=new JobConf(); Submitter.getJavaPartitioner(configuration); partitioner.configure(new JobConf()); IntWritable iw=new IntWritable(4); assertEquals(0,partitioner.getPartition(iw,new Text("test"),2)); PipesPartitioner.setNextPartition(3); assertEquals(3,partitioner.getPartition(iw,new Text("test"),2)); }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * test org.apache.hadoop.mapred.pipes.Application * test a internal functions: MessageType.REGISTER_COUNTER, INCREMENT_COUNTER, STATUS, PROGRESS... * @throws Throwable */ @Test public void testApplication() throws Throwable { JobConf conf=new JobConf(); RecordReader rReader=new Reader(); File fCommand=getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationStub"); TestTaskReporter reporter=new TestTaskReporter(); File[] psw=cleanTokenPasswordFile(); try { conf.set(MRJobConfig.TASK_ATTEMPT_ID,taskName); conf.set(MRJobConfig.CACHE_LOCALFILES,fCommand.getAbsolutePath()); Token token=new Token("user".getBytes(),"password".getBytes(),new Text("kind"),new Text("service")); TokenCache.setJobToken(token,conf.getCredentials()); FakeCollector output=new FakeCollector(new Counters.Counter(),new Progress()); FileSystem fs=new RawLocalFileSystem(); fs.setConf(conf); Writer wr=new Writer(conf,fs.create(new Path(workSpace.getAbsolutePath() + File.separator + "outfile")),IntWritable.class,Text.class,null,null,true); output.setWriter(wr); conf.set(Submitter.PRESERVE_COMMANDFILE,"true"); initStdOut(conf); Application,Writable,IntWritable,Text> application=new Application,Writable,IntWritable,Text>(conf,rReader,output,reporter,IntWritable.class,Text.class); application.getDownlink().flush(); application.getDownlink().mapItem(new IntWritable(3),new Text("txt")); application.getDownlink().flush(); application.waitForFinish(); wr.close(); String stdOut=readStdOut(conf); assertTrue(stdOut.contains("key:3")); assertTrue(stdOut.contains("value:txt")); assertEquals(1.0,reporter.getProgress(),0.01); assertNotNull(reporter.getCounter("group","name")); assertEquals(reporter.getStatus(),"PROGRESS"); stdOut=readFile(new File(workSpace.getAbsolutePath() + File.separator + "outfile")); assertEquals(0.55f,rReader.getProgress(),0.001); application.getDownlink().close(); Entry entry=output.getCollect().entrySet().iterator().next(); assertEquals(123,entry.getKey().get()); assertEquals("value",entry.getValue().toString()); try { application.abort(new Throwable()); fail(); } catch ( IOException e) { assertEquals("pipe child exception",e.getMessage()); } } finally { if (psw != null) { for ( File file : psw) { file.deleteOnExit(); } } } }

    Class: org.apache.hadoop.mapred.pipes.TestPipesNonJavaInputFormat

    BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * test PipesNonJavaInputFormat */ @Test public void testFormat() throws IOException { PipesNonJavaInputFormat inputFormat=new PipesNonJavaInputFormat(); JobConf conf=new JobConf(); Reporter reporter=mock(Reporter.class); RecordReader reader=inputFormat.getRecordReader(new FakeSplit(),conf,reporter); assertEquals(0.0f,reader.getProgress(),0.001); File input1=new File(workSpace + File.separator + "input1"); if (!input1.getParentFile().exists()) { Assert.assertTrue(input1.getParentFile().mkdirs()); } if (!input1.exists()) { Assert.assertTrue(input1.createNewFile()); } File input2=new File(workSpace + File.separator + "input2"); if (!input2.exists()) { Assert.assertTrue(input2.createNewFile()); } conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,StringUtils.escapeString(input1.getAbsolutePath()) + "," + StringUtils.escapeString(input2.getAbsolutePath())); InputSplit[] splits=inputFormat.getSplits(conf,2); assertEquals(2,splits.length); PipesNonJavaInputFormat.PipesDummyRecordReader dummyRecordReader=new PipesNonJavaInputFormat.PipesDummyRecordReader(conf,splits[0]); assertNull(dummyRecordReader.createKey()); assertNull(dummyRecordReader.createValue()); assertEquals(0,dummyRecordReader.getPos()); assertEquals(0.0,dummyRecordReader.getProgress(),0.001); assertTrue(dummyRecordReader.next(new FloatWritable(2.0f),NullWritable.get())); assertEquals(2.0,dummyRecordReader.getProgress(),0.001); dummyRecordReader.close(); }

    Class: org.apache.hadoop.mapreduce.TestCounters

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    /** * Verify counter value works */ @Test public void testCounterValue(){ final int NUMBER_TESTS=100; final int NUMBER_INC=10; final Random rand=new Random(); for (int i=0; i < NUMBER_TESTS; i++) { long initValue=rand.nextInt(); long expectedValue=initValue; Counter counter=new Counters().findCounter("test","foo"); counter.setValue(initValue); assertEquals("Counter value is not initialized correctly",expectedValue,counter.getValue()); for (int j=0; j < NUMBER_INC; j++) { int incValue=rand.nextInt(); counter.increment(incValue); expectedValue+=incValue; assertEquals("Counter value is not incremented correctly",expectedValue,counter.getValue()); } expectedValue=rand.nextInt(); counter.setValue(expectedValue); assertEquals("Counter value is not set correctly",expectedValue,counter.getValue()); } }

    BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testCountersIncrement(){ Counters fCounters=new Counters(); Counter fCounter=fCounters.findCounter(FRAMEWORK_COUNTER); fCounter.setValue(100); Counter gCounter=fCounters.findCounter("test","foo"); gCounter.setValue(200); Counters counters=new Counters(); counters.incrAllCounters(fCounters); Counter counter; for ( CounterGroup cg : fCounters) { CounterGroup group=counters.getGroup(cg.getName()); if (group.getName().equals("test")) { counter=counters.findCounter("test","foo"); assertEquals(200,counter.getValue()); } else { counter=counters.findCounter(FRAMEWORK_COUNTER); assertEquals(100,counter.getValue()); } } }

    Class: org.apache.hadoop.mapreduce.TestLargeSort

    InternalCallVerifier EqualityVerifier 
    @Test public void testLargeSort() throws Exception { String[] args=new String[0]; int[] ioSortMbs={128,256,1536}; for ( int ioSortMb : ioSortMbs) { Configuration conf=new Configuration(cluster.getConfig()); conf.setInt(MRJobConfig.IO_SORT_MB,ioSortMb); conf.setInt(LargeSorter.NUM_MAP_TASKS,1); conf.setInt(LargeSorter.MBS_PER_MAP,ioSortMb); assertEquals("Large sort failed for " + ioSortMb,0,ToolRunner.run(conf,new LargeSorter(),args)); } }

    Class: org.apache.hadoop.mapreduce.TestNewCombinerGrouping

    APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testCombiner() throws Exception { if (!new File(TEST_ROOT_DIR).mkdirs()) { throw new RuntimeException("Could not create test dir: " + TEST_ROOT_DIR); } File in=new File(TEST_ROOT_DIR,"input"); if (!in.mkdirs()) { throw new RuntimeException("Could not create test dir: " + in); } File out=new File(TEST_ROOT_DIR,"output"); PrintWriter pw=new PrintWriter(new FileWriter(new File(in,"data.txt"))); pw.println("A|a,1"); pw.println("A|b,2"); pw.println("B|a,3"); pw.println("B|b,4"); pw.println("B|c,5"); pw.close(); JobConf conf=new JobConf(); conf.set("mapreduce.framework.name","local"); Job job=new Job(conf); TextInputFormat.setInputPaths(job,new Path(in.getPath())); TextOutputFormat.setOutputPath(job,new Path(out.getPath())); job.setMapperClass(Map.class); job.setReducerClass(Reduce.class); job.setInputFormatClass(TextInputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); job.setOutputFormatClass(TextOutputFormat.class); job.setGroupingComparatorClass(GroupComparator.class); job.setCombinerKeyGroupingComparatorClass(GroupComparator.class); job.setCombinerClass(Combiner.class); job.getConfiguration().setInt("min.num.spills.for.combine",0); job.submit(); job.waitForCompletion(false); if (job.isSuccessful()) { Counters counters=job.getCounters(); long combinerInputRecords=counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter","COMBINE_INPUT_RECORDS").getValue(); long combinerOutputRecords=counters.findCounter("org.apache.hadoop.mapreduce.TaskCounter","COMBINE_OUTPUT_RECORDS").getValue(); Assert.assertTrue(combinerInputRecords > 0); Assert.assertTrue(combinerInputRecords > combinerOutputRecords); BufferedReader br=new BufferedReader(new FileReader(new File(out,"part-r-00000"))); Set output=new HashSet(); String line=br.readLine(); Assert.assertNotNull(line); output.add(line.substring(0,1) + line.substring(4,5)); line=br.readLine(); Assert.assertNotNull(line); output.add(line.substring(0,1) + line.substring(4,5)); line=br.readLine(); Assert.assertNull(line); br.close(); Set expected=new HashSet(); expected.add("A2"); expected.add("B5"); Assert.assertEquals(expected,output); } else { Assert.fail("Job failed"); } }

    Class: org.apache.hadoop.mapreduce.TestTaskContext

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier IgnoredMethod HybridVerifier 
    /** * Tests context.setStatus method. * TODO fix testcase * @throws IOException * @throws InterruptedException * @throws ClassNotFoundException */ @Test @Ignore public void testContextStatus() throws IOException, InterruptedException, ClassNotFoundException { Path test=new Path(testRootTempDir,"testContextStatus"); int numMaps=1; Job job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numMaps,0); job.setMapperClass(MyMapper.class); job.waitForCompletion(true); assertTrue("Job failed",job.isSuccessful()); TaskReport[] reports=job.getTaskReports(TaskType.MAP); assertEquals(numMaps,reports.length); assertEquals(myStatus,reports[0].getState()); int numReduces=1; job=MapReduceTestUtil.createJob(createJobConf(),new Path(test,"in"),new Path(test,"out"),numMaps,numReduces); job.setMapperClass(DataCopyMapper.class); job.setReducerClass(DataCopyReducer.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); job.setMaxMapAttempts(1); job.setMaxReduceAttempts(0); job.waitForCompletion(true); assertTrue("Job failed",job.isSuccessful()); }

    Class: org.apache.hadoop.mapreduce.TestTypeConverter

    EqualityVerifier 
    @Test public void testEnums() throws Exception { for ( YarnApplicationState applicationState : YarnApplicationState.values()) { TypeConverter.fromYarn(applicationState,FinalApplicationStatus.FAILED); } Assert.assertEquals(State.PREP,TypeConverter.fromYarn(YarnApplicationState.NEW_SAVING,FinalApplicationStatus.FAILED)); for ( TaskType taskType : TaskType.values()) { TypeConverter.fromYarn(taskType); } for ( JobState jobState : JobState.values()) { TypeConverter.fromYarn(jobState); } for ( QueueState queueState : QueueState.values()) { TypeConverter.fromYarn(queueState); } for ( TaskState taskState : TaskState.values()) { TypeConverter.fromYarn(taskState); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Test that child queues are converted too during conversion of the parent * queue */ @Test public void testFromYarnQueue(){ org.apache.hadoop.yarn.api.records.QueueInfo child=Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING); org.apache.hadoop.yarn.api.records.QueueInfo queueInfo=Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class); List children=new ArrayList(); children.add(child); Mockito.when(queueInfo.getChildQueues()).thenReturn(children); Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING); org.apache.hadoop.mapreduce.QueueInfo returned=TypeConverter.fromYarn(queueInfo,new Configuration()); Assert.assertEquals("QueueInfo children weren't properly converted",returned.getQueueChildren().size(),1); }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testFromYarnApplicationReport(){ ApplicationId mockAppId=mock(ApplicationId.class); when(mockAppId.getClusterTimestamp()).thenReturn(12345L); when(mockAppId.getId()).thenReturn(6789); ApplicationReport mockReport=mock(ApplicationReport.class); when(mockReport.getTrackingUrl()).thenReturn("dummy-tracking-url"); when(mockReport.getApplicationId()).thenReturn(mockAppId); when(mockReport.getYarnApplicationState()).thenReturn(YarnApplicationState.KILLED); when(mockReport.getUser()).thenReturn("dummy-user"); when(mockReport.getQueue()).thenReturn("dummy-queue"); String jobFile="dummy-path/job.xml"; try { JobStatus status=TypeConverter.fromYarn(mockReport,jobFile); } catch ( NullPointerException npe) { Assert.fail("Type converstion from YARN fails for jobs without " + "ApplicationUsageReport"); } ApplicationResourceUsageReport appUsageRpt=Records.newRecord(ApplicationResourceUsageReport.class); Resource r=Records.newRecord(Resource.class); r.setMemory(2048); appUsageRpt.setNeededResources(r); appUsageRpt.setNumReservedContainers(1); appUsageRpt.setNumUsedContainers(3); appUsageRpt.setReservedResources(r); appUsageRpt.setUsedResources(r); when(mockReport.getApplicationResourceUsageReport()).thenReturn(appUsageRpt); JobStatus status=TypeConverter.fromYarn(mockReport,jobFile); Assert.assertNotNull("fromYarn returned null status",status); Assert.assertEquals("jobFile set incorrectly","dummy-path/job.xml",status.getJobFile()); Assert.assertEquals("queue set incorrectly","dummy-queue",status.getQueue()); Assert.assertEquals("trackingUrl set incorrectly","dummy-tracking-url",status.getTrackingUrl()); Assert.assertEquals("user set incorrectly","dummy-user",status.getUsername()); Assert.assertEquals("schedulingInfo set incorrectly","dummy-tracking-url",status.getSchedulingInfo()); Assert.assertEquals("jobId set incorrectly",6789,status.getJobID().getId()); Assert.assertEquals("state set incorrectly",JobStatus.State.KILLED,status.getState()); Assert.assertEquals("needed mem info set incorrectly",2048,status.getNeededMem()); Assert.assertEquals("num rsvd slots info set incorrectly",1,status.getNumReservedSlots()); Assert.assertEquals("num used slots info set incorrectly",3,status.getNumUsedSlots()); Assert.assertEquals("rsvd mem info set incorrectly",2048,status.getReservedMem()); Assert.assertEquals("used mem info set incorrectly",2048,status.getUsedMem()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFromYarnQueueInfo(){ org.apache.hadoop.yarn.api.records.QueueInfo queueInfo=Records.newRecord(org.apache.hadoop.yarn.api.records.QueueInfo.class); queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED); org.apache.hadoop.mapreduce.QueueInfo returned=TypeConverter.fromYarn(queueInfo,new Configuration()); Assert.assertEquals("queueInfo translation didn't work.",returned.getState().toString(),queueInfo.getQueueState().toString().toLowerCase()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFromYarn() throws Exception { int appStartTime=612354; int appFinishTime=612355; YarnApplicationState state=YarnApplicationState.RUNNING; ApplicationId applicationId=ApplicationId.newInstance(0,0); ApplicationReport applicationReport=Records.newRecord(ApplicationReport.class); applicationReport.setApplicationId(applicationId); applicationReport.setYarnApplicationState(state); applicationReport.setStartTime(appStartTime); applicationReport.setFinishTime(appFinishTime); applicationReport.setUser("TestTypeConverter-user"); ApplicationResourceUsageReport appUsageRpt=Records.newRecord(ApplicationResourceUsageReport.class); Resource r=Records.newRecord(Resource.class); r.setMemory(2048); appUsageRpt.setNeededResources(r); appUsageRpt.setNumReservedContainers(1); appUsageRpt.setNumUsedContainers(3); appUsageRpt.setReservedResources(r); appUsageRpt.setUsedResources(r); applicationReport.setApplicationResourceUsageReport(appUsageRpt); JobStatus jobStatus=TypeConverter.fromYarn(applicationReport,"dummy-jobfile"); Assert.assertEquals(appStartTime,jobStatus.getStartTime()); Assert.assertEquals(appFinishTime,jobStatus.getFinishTime()); Assert.assertEquals(state.toString(),jobStatus.getState().toString()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFromYarnJobReport() throws Exception { int jobStartTime=612354; int jobFinishTime=612355; JobState state=JobState.RUNNING; JobId jobId=Records.newRecord(JobId.class); JobReport jobReport=Records.newRecord(JobReport.class); ApplicationId applicationId=ApplicationId.newInstance(0,0); jobId.setAppId(applicationId); jobId.setId(0); jobReport.setJobId(jobId); jobReport.setJobState(state); jobReport.setStartTime(jobStartTime); jobReport.setFinishTime(jobFinishTime); jobReport.setUser("TestTypeConverter-user"); JobStatus jobStatus=TypeConverter.fromYarn(jobReport,"dummy-jobfile"); Assert.assertEquals(jobStartTime,jobStatus.getStartTime()); Assert.assertEquals(jobFinishTime,jobStatus.getFinishTime()); Assert.assertEquals(state.toString(),jobStatus.getState().toString()); }

    Class: org.apache.hadoop.mapreduce.filecache.TestClientDistributedCacheManager

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testDetermineTimestamps() throws IOException { Job job=Job.getInstance(conf); job.addCacheFile(firstCacheFile.toUri()); job.addCacheFile(secondCacheFile.toUri()); Configuration jobConf=job.getConfiguration(); Map statCache=new HashMap(); ClientDistributedCacheManager.determineTimestamps(jobConf,statCache); FileStatus firstStatus=statCache.get(firstCacheFile.toUri()); FileStatus secondStatus=statCache.get(secondCacheFile.toUri()); Assert.assertNotNull(firstStatus); Assert.assertNotNull(secondStatus); Assert.assertEquals(2,statCache.size()); String expected=firstStatus.getModificationTime() + "," + secondStatus.getModificationTime(); Assert.assertEquals(expected,jobConf.get(MRJobConfig.CACHE_FILE_TIMESTAMPS)); }

    Class: org.apache.hadoop.mapreduce.jobhistory.TestEvents

    InternalCallVerifier EqualityVerifier 
    /** * simple test JobPriorityChangeEvent and JobPriorityChange * @throws Exception */ @Test(timeout=10000) public void testJobPriorityChange() throws Exception { org.apache.hadoop.mapreduce.JobID jid=new JobID("001",1); JobPriorityChangeEvent test=new JobPriorityChangeEvent(jid,JobPriority.LOW); assertEquals(test.getJobId().toString(),jid.toString()); assertEquals(test.getPriority(),JobPriority.LOW); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=10000) public void testJobQueueChange() throws Exception { org.apache.hadoop.mapreduce.JobID jid=new JobID("001",1); JobQueueChangeEvent test=new JobQueueChangeEvent(jid,"newqueue"); assertEquals(test.getJobId().toString(),jid.toString()); assertEquals(test.getJobQueueName(),"newqueue"); }

    InternalCallVerifier EqualityVerifier 
    /** * simple test TaskUpdatedEvent and TaskUpdated * @throws Exception */ @Test(timeout=10000) public void testTaskUpdated() throws Exception { JobID jid=new JobID("001",1); TaskID tid=new TaskID(jid,TaskType.REDUCE,2); TaskUpdatedEvent test=new TaskUpdatedEvent(tid,1234L); assertEquals(test.getTaskId().toString(),tid.toString()); assertEquals(test.getFinishTime(),1234L); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testEvents() throws Exception { EventReader reader=new EventReader(new DataInputStream(new ByteArrayInputStream(getEvents()))); HistoryEvent e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.JOB_PRIORITY_CHANGED)); assertEquals("ID",((JobPriorityChange)e.getDatum()).jobid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.JOB_STATUS_CHANGED)); assertEquals("ID",((JobStatusChanged)e.getDatum()).jobid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.TASK_UPDATED)); assertEquals("ID",((TaskUpdated)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED)); assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.JOB_KILLED)); assertEquals("ID",((JobUnsuccessfulCompletion)e.getDatum()).jobid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED)); assertEquals("task_1_2_r03_4",((TaskAttemptStarted)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED)); assertEquals("task_1_2_r03_4",((TaskAttemptFinished)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED)); assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED)); assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_STARTED)); assertEquals("task_1_2_r03_4",((TaskAttemptStarted)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_FINISHED)); assertEquals("task_1_2_r03_4",((TaskAttemptFinished)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED)); assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString()); e=reader.getNextEvent(); assertTrue(e.getEventType().equals(EventType.REDUCE_ATTEMPT_KILLED)); assertEquals("task_1_2_r03_4",((TaskAttemptUnsuccessfulCompletion)e.getDatum()).taskid.toString()); reader.close(); }

    InternalCallVerifier EqualityVerifier 
    /** * test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished * @throws Exception */ @Test(timeout=10000) public void testTaskAttemptFinishedEvent() throws Exception { JobID jid=new JobID("001",1); TaskID tid=new TaskID(jid,TaskType.REDUCE,2); TaskAttemptID taskAttemptId=new TaskAttemptID(tid,3); Counters counters=new Counters(); TaskAttemptFinishedEvent test=new TaskAttemptFinishedEvent(taskAttemptId,TaskType.REDUCE,"TEST",123L,"RAKNAME","HOSTNAME","STATUS",counters); assertEquals(test.getAttemptId().toString(),taskAttemptId.toString()); assertEquals(test.getCounters(),counters); assertEquals(test.getFinishTime(),123L); assertEquals(test.getHostname(),"HOSTNAME"); assertEquals(test.getRackName(),"RAKNAME"); assertEquals(test.getState(),"STATUS"); assertEquals(test.getTaskId(),tid); assertEquals(test.getTaskStatus(),"TEST"); assertEquals(test.getTaskType(),TaskType.REDUCE); }

    Class: org.apache.hadoop.mapreduce.jobhistory.TestJobHistoryEventHandler

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetHistoryIntermediateDoneDirForUser() throws IOException { Configuration conf=new Configuration(); conf.set(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR,"/mapred/history/done_intermediate"); conf.set(MRJobConfig.USER_NAME,System.getProperty("user.name")); String pathStr=JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf); Assert.assertEquals("/mapred/history/done_intermediate/" + System.getProperty("user.name"),pathStr); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,dfsCluster.getURI().toString()); FileOutputStream os=new FileOutputStream(coreSitePath); conf.writeXml(os); os.close(); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,"file:///"); pathStr=JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf); Assert.assertEquals(dfsCluster.getURI().toString() + "/mapred/history/done_intermediate/" + System.getProperty("user.name"),pathStr); }

    Class: org.apache.hadoop.mapreduce.lib.db.TestDbClasses

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=1000) public void testDataDrivenDBInputFormat() throws Exception { JobContext jobContext=mock(JobContext.class); Configuration configuration=new Configuration(); configuration.setInt(MRJobConfig.NUM_MAPS,1); when(jobContext.getConfiguration()).thenReturn(configuration); DataDrivenDBInputFormat format=new DataDrivenDBInputFormat(); List splits=format.getSplits(jobContext); assertEquals(1,splits.size()); DataDrivenDBInputSplit split=(DataDrivenDBInputSplit)splits.get(0); assertEquals("1=1",split.getLowerClause()); assertEquals("1=1",split.getUpperClause()); configuration.setInt(MRJobConfig.NUM_MAPS,2); DataDrivenDBInputFormat.setBoundingQuery(configuration,"query"); assertEquals("query",configuration.get(DBConfiguration.INPUT_BOUNDING_QUERY)); Job job=mock(Job.class); when(job.getConfiguration()).thenReturn(configuration); DataDrivenDBInputFormat.setInput(job,NullDBWritable.class,"query","Bounding Query"); assertEquals("Bounding Query",configuration.get(DBConfiguration.INPUT_BOUNDING_QUERY)); }

    InternalCallVerifier EqualityVerifier 
    /** * test generate sql script for OracleDBRecordReader. */ @Test(timeout=2000) public void testOracleDBRecordReader() throws Exception { DBInputSplit splitter=new DBInputSplit(1,10); Configuration configuration=new Configuration(); Connection connect=DriverForTest.getConnection(); DBConfiguration dbConfiguration=new DBConfiguration(configuration); dbConfiguration.setInputOrderBy("Order"); String[] fields={"f1","f2"}; OracleDBRecordReader recorder=new OracleDBRecordReader(splitter,NullDBWritable.class,configuration,connect,dbConfiguration,"condition",fields,"table"); assertEquals("SELECT * FROM (SELECT a.*,ROWNUM dbif_rno FROM ( SELECT f1, f2 FROM table WHERE condition ORDER BY Order ) a WHERE rownum <= 10 ) WHERE dbif_rno > 1",recorder.getSelectQuery()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=1000) public void testOracleDataDrivenDBInputFormat() throws Exception { OracleDataDrivenDBInputFormat format=new OracleDataDrivenDBInputFormatForTest(); testCommonSplitterTypes(format); assertEquals(OracleDateSplitter.class,format.getSplitter(Types.TIMESTAMP).getClass()); assertEquals(OracleDateSplitter.class,format.getSplitter(Types.DATE).getClass()); assertEquals(OracleDateSplitter.class,format.getSplitter(Types.TIME).getClass()); }

    InternalCallVerifier EqualityVerifier 
    /** * test splitters from DataDrivenDBInputFormat. For different data types may * be different splitter */ @Test(timeout=1000) public void testDataDrivenDBInputFormatSplitter(){ DataDrivenDBInputFormat format=new DataDrivenDBInputFormat(); testCommonSplitterTypes(format); assertEquals(DateSplitter.class,format.getSplitter(Types.TIMESTAMP).getClass()); assertEquals(DateSplitter.class,format.getSplitter(Types.DATE).getClass()); assertEquals(DateSplitter.class,format.getSplitter(Types.TIME).getClass()); }

    Class: org.apache.hadoop.mapreduce.lib.db.TestSplitters

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=2000) public void testBooleanSplitter() throws Exception { BooleanSplitter splitter=new BooleanSplitter(); ResultSet result=mock(ResultSet.class); when(result.getString(1)).thenReturn("result1"); List splits=splitter.split(configuration,result,"column"); assertSplits(new String[]{"column = FALSE column = FALSE","column IS NULL column IS NULL"},splits); when(result.getString(1)).thenReturn("result1"); when(result.getString(2)).thenReturn("result2"); when(result.getBoolean(1)).thenReturn(true); when(result.getBoolean(2)).thenReturn(false); splits=splitter.split(configuration,result,"column"); assertEquals(0,splits.size()); when(result.getString(1)).thenReturn("result1"); when(result.getString(2)).thenReturn("result2"); when(result.getBoolean(1)).thenReturn(false); when(result.getBoolean(2)).thenReturn(true); splits=splitter.split(configuration,result,"column"); assertSplits(new String[]{"column = FALSE column = FALSE",".*column = TRUE"},splits); }

    Class: org.apache.hadoop.mapreduce.lib.input.TestCombineFileInputFormat

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Test that directories do not get included as part of getSplits() */ @Test public void testGetSplitsWithDirectory() throws Exception { MiniDFSCluster dfs=null; try { Configuration conf=new Configuration(); dfs=new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1).build(); dfs.waitActive(); dfs=new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1).build(); dfs.waitActive(); FileSystem fileSys=dfs.getFileSystem(); Path dir1=new Path("/dir1"); Path file=new Path("/dir1/file1"); Path dir2=new Path("/dir1/dir2"); if (!fileSys.mkdirs(dir1)) { throw new IOException("Mkdirs failed to create " + dir1.toString()); } FSDataOutputStream out=fileSys.create(file); out.write(new byte[0]); out.close(); if (!fileSys.mkdirs(dir2)) { throw new IOException("Mkdirs failed to create " + dir2.toString()); } DummyInputFormat inFormat=new DummyInputFormat(); Job job=Job.getInstance(conf); FileInputFormat.setInputPaths(job,"/dir1"); List splits=inFormat.getSplits(job); assertEquals(1,splits.size()); CombineFileSplit fileSplit=(CombineFileSplit)splits.get(0); assertEquals(1,fileSplit.getNumPaths()); assertEquals(file.getName(),fileSplit.getPath(0).getName()); assertEquals(0,fileSplit.getOffset(0)); assertEquals(0,fileSplit.getLength(0)); } finally { if (dfs != null) { dfs.shutdown(); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Test when the input file's length is 0. */ @Test public void testForEmptyFile() throws Exception { Configuration conf=new Configuration(); FileSystem fileSys=FileSystem.get(conf); Path file=new Path("test" + "/file"); FSDataOutputStream out=fileSys.create(file,true,conf.getInt("io.file.buffer.size",4096),(short)1,(long)BLOCKSIZE); out.write(new byte[0]); out.close(); DummyInputFormat inFormat=new DummyInputFormat(); Job job=Job.getInstance(conf); FileInputFormat.setInputPaths(job,"test"); List splits=inFormat.getSplits(job); assertEquals(1,splits.size()); CombineFileSplit fileSplit=(CombineFileSplit)splits.get(0); assertEquals(1,fileSplit.getNumPaths()); assertEquals(file.getName(),fileSplit.getPath(0).getName()); assertEquals(0,fileSplit.getOffset(0)); assertEquals(0,fileSplit.getLength(0)); fileSys.delete(file.getParent(),true); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test when input files are from non-default file systems */ @Test public void testForNonDefaultFileSystem() throws Throwable { Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,DUMMY_FS_URI); assertEquals(DUMMY_FS_URI,FileSystem.getDefaultUri(conf).toString()); Path localPath=new Path("testFile1"); FileSystem lfs=FileSystem.getLocal(conf); FSDataOutputStream dos=lfs.create(localPath); dos.writeChars("Local file for CFIF"); dos.close(); Job job=Job.getInstance(conf); FileInputFormat.setInputPaths(job,lfs.makeQualified(localPath)); DummyInputFormat inFormat=new DummyInputFormat(); List splits=inFormat.getSplits(job); assertTrue(splits.size() > 0); for ( InputSplit s : splits) { CombineFileSplit cfs=(CombineFileSplit)s; for ( Path p : cfs.getPaths()) { assertEquals(p.toUri().getScheme(),"file"); } } }

    Class: org.apache.hadoop.mapreduce.lib.input.TestCombineSequenceFileInputFormat

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testFormat() throws IOException, InterruptedException { Job job=Job.getInstance(conf); Random random=new Random(); long seed=random.nextLong(); random.setSeed(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int length=10000; final int numFiles=10; createFiles(length,numFiles,random,job); TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); InputFormat format=new CombineSequenceFileInputFormat(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(length / (SequenceFile.SYNC_INTERVAL / 20)) + 1; LOG.info("splitting: requesting = " + numSplits); List splits=format.getSplits(job); LOG.info("splitting: got = " + splits.size()); assertEquals("We got more than one splits!",1,splits.size()); InputSplit split=splits.get(0); assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass()); BitSet bits=new BitSet(length); RecordReader reader=format.createRecordReader(split,context); MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split); reader.initialize(split,mcontext); assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass()); try { while (reader.nextKeyValue()) { IntWritable key=reader.getCurrentKey(); BytesWritable value=reader.getCurrentValue(); assertNotNull("Value should not be null.",value); final int k=key.get(); LOG.debug("read " + k); assertFalse("Key in multiple partitions.",bits.get(k)); bits.set(k); } } finally { reader.close(); } assertEquals("Some keys in no partition.",length,bits.cardinality()); } }

    Class: org.apache.hadoop.mapreduce.lib.input.TestCombineTextInputFormat

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testFormat() throws Exception { Job job=Job.getInstance(new Configuration(defaultConf)); Random random=new Random(); long seed=random.nextLong(); LOG.info("seed = " + seed); random.setSeed(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int length=10000; final int numFiles=10; createFiles(length,numFiles,random); CombineTextInputFormat format=new CombineTextInputFormat(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(length / 20) + 1; LOG.info("splitting: requesting = " + numSplits); List splits=format.getSplits(job); LOG.info("splitting: got = " + splits.size()); assertEquals("We got more than one splits!",1,splits.size()); InputSplit split=splits.get(0); assertEquals("It should be CombineFileSplit",CombineFileSplit.class,split.getClass()); BitSet bits=new BitSet(length); LOG.debug("split= " + split); TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); RecordReader reader=format.createRecordReader(split,context); assertEquals("reader class is CombineFileRecordReader.",CombineFileRecordReader.class,reader.getClass()); MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),split); reader.initialize(split,mcontext); try { int count=0; while (reader.nextKeyValue()) { LongWritable key=reader.getCurrentKey(); assertNotNull("Key should not be null.",key); Text value=reader.getCurrentValue(); final int v=Integer.parseInt(value.toString()); LOG.debug("read " + v); assertFalse("Key in multiple partitions.",bits.get(v)); bits.set(v); count++; } LOG.debug("split=" + split + " count="+ count); } finally { reader.close(); } assertEquals("Some keys in no partition.",length,bits.cardinality()); } }

    APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    /** * Test using the gzip codec for reading */ @Test(timeout=10000) public void testGzip() throws IOException, InterruptedException { Configuration conf=new Configuration(defaultConf); CompressionCodec gzip=new GzipCodec(); ReflectionUtils.setConf(gzip,conf); localFs.delete(workDir,true); writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"the quick\nbrown\nfox jumped\nover\n the lazy\n dog\n"); writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"this is a test\nof gzip\n"); Job job=Job.getInstance(conf); FileInputFormat.setInputPaths(job,workDir); CombineTextInputFormat format=new CombineTextInputFormat(); List splits=format.getSplits(job); assertEquals("compressed splits == 1",1,splits.size()); List results=readSplit(format,splits.get(0),job); assertEquals("splits[0] length",8,results.size()); final String[] firstList={"the quick","brown","fox jumped","over"," the lazy"," dog"}; final String[] secondList={"this is a test","of gzip"}; String first=results.get(0).toString(); if (first.equals(firstList[0])) { testResults(results,firstList,secondList); } else if (first.equals(secondList[0])) { testResults(results,secondList,firstList); } else { fail("unexpected first token!"); } }

    Class: org.apache.hadoop.mapreduce.lib.input.TestFileInputFormat

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNumInputFilesWithoutRecursively() throws Exception { Configuration conf=getConfiguration(); conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads); Job job=Job.getInstance(conf); FileInputFormat fileInputFormat=new TextInputFormat(); List splits=fileInputFormat.getSplits(job); Assert.assertEquals("Input splits are not correct",2,splits.size()); verifySplits(Lists.newArrayList("test:/a1/a2","test:/a1/file1"),splits); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testListStatusErrorOnNonExistantDir() throws IOException { Configuration conf=new Configuration(); conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads); configureTestErrorOnNonExistantDir(conf,localFs); Job job=Job.getInstance(conf); FileInputFormat fif=new TextInputFormat(); try { fif.listStatus(job); Assert.fail("Expecting an IOException for a missing Input path"); } catch ( IOException e) { Path expectedExceptionPath=new Path(TEST_ROOT_DIR,"input2"); expectedExceptionPath=localFs.makeQualified(expectedExceptionPath); Assert.assertTrue(e instanceof InvalidInputException); Assert.assertEquals("Input path does not exist: " + expectedExceptionPath.toString(),e.getMessage()); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNumInputFilesRecursively() throws Exception { Configuration conf=getConfiguration(); conf.set(FileInputFormat.INPUT_DIR_RECURSIVE,"true"); conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads); Job job=Job.getInstance(conf); FileInputFormat fileInputFormat=new TextInputFormat(); List splits=fileInputFormat.getSplits(job); Assert.assertEquals("Input splits are not correct",3,splits.size()); verifySplits(Lists.newArrayList("test:/a1/a2/file2","test:/a1/a2/file3","test:/a1/file1"),splits); conf=getConfiguration(); conf.set("mapred.input.dir.recursive","true"); job=Job.getInstance(conf); splits=fileInputFormat.getSplits(job); verifySplits(Lists.newArrayList("test:/a1/a2/file2","test:/a1/a2/file3","test:/a1/file1"),splits); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testListLocatedStatus() throws Exception { Configuration conf=getConfiguration(); conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS,numThreads); conf.setBoolean("fs.test.impl.disable.cache",false); conf.set(FileInputFormat.INPUT_DIR,"test:///a1/a2"); MockFileSystem mockFs=(MockFileSystem)new Path("test:///").getFileSystem(conf); Assert.assertEquals("listLocatedStatus already called",0,mockFs.numListLocatedStatusCalls); Job job=Job.getInstance(conf); FileInputFormat fileInputFormat=new TextInputFormat(); List splits=fileInputFormat.getSplits(job); Assert.assertEquals("Input splits are not correct",2,splits.size()); Assert.assertEquals("listLocatedStatuss calls",1,mockFs.numListLocatedStatusCalls); FileSystem.closeAll(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testSplitLocationInfo() throws Exception { Configuration conf=getConfiguration(); conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,"test:///a1/a2"); Job job=Job.getInstance(conf); TextInputFormat fileInputFormat=new TextInputFormat(); List splits=fileInputFormat.getSplits(job); String[] locations=splits.get(0).getLocations(); Assert.assertEquals(2,locations.length); SplitLocationInfo[] locationInfo=splits.get(0).getLocationInfo(); Assert.assertEquals(2,locationInfo.length); SplitLocationInfo localhostInfo=locations[0].equals("localhost") ? locationInfo[0] : locationInfo[1]; SplitLocationInfo otherhostInfo=locations[0].equals("otherhost") ? locationInfo[0] : locationInfo[1]; Assert.assertTrue(localhostInfo.isOnDisk()); Assert.assertTrue(localhostInfo.isInMemory()); Assert.assertTrue(otherhostInfo.isOnDisk()); Assert.assertFalse(otherhostInfo.isInMemory()); }

    Class: org.apache.hadoop.mapreduce.lib.input.TestFixedLengthInputFormat

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Test using the gzip codec with two input files. */ @Test(timeout=5000) public void testGzipWithTwoInputs() throws Exception { CompressionCodec gzip=new GzipCodec(); localFs.delete(workDir,true); Job job=Job.getInstance(defaultConf); FixedLengthInputFormat format=new FixedLengthInputFormat(); format.setRecordLength(job.getConfiguration(),5); ReflectionUtils.setConf(gzip,job.getConfiguration()); FileInputFormat.setInputPaths(job,workDir); writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"one two threefour five six seveneightnine ten "); writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"ten nine eightsevensix five four threetwo one "); List splits=format.getSplits(job); assertEquals("compressed splits == 2",2,splits.size()); FileSplit tmp=(FileSplit)splits.get(0); if (tmp.getPath().getName().equals("part2.txt.gz")) { splits.set(0,splits.get(1)); splits.set(1,tmp); } List results=readSplit(format,splits.get(0),job); assertEquals("splits[0] length",10,results.size()); assertEquals("splits[0][5]","six ",results.get(5)); results=readSplit(format,splits.get(1),job); assertEquals("splits[1] length",10,results.size()); assertEquals("splits[1][0]","ten ",results.get(0)); assertEquals("splits[1][1]","nine ",results.get(1)); }

    Class: org.apache.hadoop.mapreduce.lib.input.TestLineRecordReaderJobs

    InternalCallVerifier EqualityVerifier 
    /** * Test the case when a custom record delimiter is specified using the * textinputformat.record.delimiter configuration property * @throws IOException * @throws InterruptedException * @throws ClassNotFoundException */ @Test public void testCustomRecordDelimiters() throws IOException, InterruptedException, ClassNotFoundException { Configuration conf=new Configuration(); conf.set("textinputformat.record.delimiter","\t\n"); FileSystem localFs=FileSystem.getLocal(conf); localFs.delete(workDir,true); createInputFile(conf); createAndRunJob(conf); String expected="0\tabc\ndef\n9\tghi\njkl\n"; assertEquals(expected,readOutputFile(conf)); }

    EqualityVerifier 
    /** * Test the default behavior when the textinputformat.record.delimiter * configuration property is not specified * @throws IOException * @throws InterruptedException * @throws ClassNotFoundException */ @Test public void testDefaultRecordDelimiters() throws IOException, InterruptedException, ClassNotFoundException { Configuration conf=new Configuration(); FileSystem localFs=FileSystem.getLocal(conf); localFs.delete(workDir,true); createInputFile(conf); createAndRunJob(conf); String expected="0\tabc\n4\tdef\t\n9\tghi\n13\tjkl\n"; assertEquals(expected,readOutputFile(conf)); }

    Class: org.apache.hadoop.mapreduce.lib.input.TestMRCJCFileInputFormat

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test @SuppressWarnings({"rawtypes","unchecked"}) public void testLastInputSplitAtSplitBoundary() throws Exception { FileInputFormat fif=new FileInputFormatForTest(1024l * 1024 * 1024,128l * 1024 * 1024); Configuration conf=new Configuration(); JobContext jobContext=mock(JobContext.class); when(jobContext.getConfiguration()).thenReturn(conf); List splits=fif.getSplits(jobContext); assertEquals(8,splits.size()); for (int i=0; i < splits.size(); i++) { InputSplit split=splits.get(i); assertEquals(("host" + i),split.getLocations()[0]); } }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test @SuppressWarnings({"rawtypes","unchecked"}) public void testLastInputSplitExceedingSplitBoundary() throws Exception { FileInputFormat fif=new FileInputFormatForTest(1027l * 1024 * 1024,128l * 1024 * 1024); Configuration conf=new Configuration(); JobContext jobContext=mock(JobContext.class); when(jobContext.getConfiguration()).thenReturn(conf); List splits=fif.getSplits(jobContext); assertEquals(8,splits.size()); for (int i=0; i < splits.size(); i++) { InputSplit split=splits.get(i); assertEquals(("host" + i),split.getLocations()[0]); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Test when the input file's length is 0. */ @Test public void testForEmptyFile() throws Exception { Configuration conf=new Configuration(); FileSystem fileSys=FileSystem.get(conf); Path file=new Path("test" + "/file"); FSDataOutputStream out=fileSys.create(file,true,conf.getInt("io.file.buffer.size",4096),(short)1,(long)1024); out.write(new byte[0]); out.close(); DummyInputFormat inFormat=new DummyInputFormat(); Job job=Job.getInstance(conf); FileInputFormat.setInputPaths(job,"test"); List splits=inFormat.getSplits(job); assertEquals(1,splits.size()); FileSplit fileSplit=(FileSplit)splits.get(0); assertEquals(0,fileSplit.getLocations().length); assertEquals(file.getName(),fileSplit.getPath().getName()); assertEquals(0,fileSplit.getStart()); assertEquals(0,fileSplit.getLength()); fileSys.delete(file.getParent(),true); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAddInputPath() throws IOException { final Configuration conf=new Configuration(); conf.set("fs.defaultFS","s3://abc:xyz@hostname/"); final Job j=Job.getInstance(conf); j.getConfiguration().set("fs.defaultFS","s3://abc:xyz@hostname/"); final FileSystem defaultfs=FileSystem.get(conf); System.out.println("defaultfs.getUri() = " + defaultfs.getUri()); { final Path original=new Path("file:/foo"); System.out.println("original = " + original); FileInputFormat.addInputPath(j,original); final Path[] results=FileInputFormat.getInputPaths(j); System.out.println("results = " + Arrays.asList(results)); assertEquals(1,results.length); assertEquals(original,results[0]); } { final Path original=new Path("file:/bar"); System.out.println("original = " + original); FileInputFormat.setInputPaths(j,original); final Path[] results=FileInputFormat.getInputPaths(j); System.out.println("results = " + Arrays.asList(results)); assertEquals(1,results.length); assertEquals(original,results[0]); } }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test @SuppressWarnings({"rawtypes","unchecked"}) public void testLastInputSplitSingleSplit() throws Exception { FileInputFormat fif=new FileInputFormatForTest(100l * 1024 * 1024,128l * 1024 * 1024); Configuration conf=new Configuration(); JobContext jobContext=mock(JobContext.class); when(jobContext.getConfiguration()).thenReturn(conf); List splits=fif.getSplits(jobContext); assertEquals(1,splits.size()); for (int i=0; i < splits.size(); i++) { InputSplit split=splits.get(i); assertEquals(("host" + i),split.getLocations()[0]); } }

    Class: org.apache.hadoop.mapreduce.lib.input.TestMRKeyValueTextInputFormat

    InternalCallVerifier EqualityVerifier 
    @Test public void testNewLines() throws Exception { LineReader in=makeStream("a\nbb\n\nccc\rdddd\r\neeeee"); Text out=new Text(); in.readLine(out); assertEquals("line1 length",1,out.getLength()); in.readLine(out); assertEquals("line2 length",2,out.getLength()); in.readLine(out); assertEquals("line3 length",0,out.getLength()); in.readLine(out); assertEquals("line4 length",3,out.getLength()); in.readLine(out); assertEquals("line5 length",4,out.getLength()); in.readLine(out); assertEquals("line5 length",5,out.getLength()); assertEquals("end of file",0,in.readLine(out)); }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testSplitableCodecs() throws Exception { final Job job=Job.getInstance(defaultConf); final Configuration conf=job.getConfiguration(); CompressionCodec codec=null; try { codec=(CompressionCodec)ReflectionUtils.newInstance(conf.getClassByName("org.apache.hadoop.io.compress.BZip2Codec"),conf); } catch ( ClassNotFoundException cnfe) { throw new IOException("Illegal codec!"); } Path file=new Path(workDir,"test" + codec.getDefaultExtension()); int seed=new Random().nextInt(); LOG.info("seed = " + seed); Random random=new Random(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int MAX_LENGTH=500000; FileInputFormat.setMaxInputSplitSize(job,MAX_LENGTH / 20); for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 4) + 1) { LOG.info("creating; entries = " + length); Writer writer=new OutputStreamWriter(codec.createOutputStream(localFs.create(file))); try { for (int i=0; i < length; i++) { writer.write(Integer.toString(i * 2)); writer.write("\t"); writer.write(Integer.toString(i)); writer.write("\n"); } } finally { writer.close(); } KeyValueTextInputFormat format=new KeyValueTextInputFormat(); assertTrue("KVTIF claims not splittable",format.isSplitable(job,file)); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(MAX_LENGTH / 2000) + 1; LOG.info("splitting: requesting = " + numSplits); List splits=format.getSplits(job); LOG.info("splitting: got = " + splits.size()); BitSet bits=new BitSet(length); for (int j=0; j < splits.size(); j++) { LOG.debug("split[" + j + "]= "+ splits.get(j)); TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); RecordReader reader=format.createRecordReader(splits.get(j),context); Class clazz=reader.getClass(); MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),splits.get(j)); reader.initialize(splits.get(j),mcontext); Text key=null; Text value=null; try { int count=0; while (reader.nextKeyValue()) { key=reader.getCurrentKey(); value=reader.getCurrentValue(); final int k=Integer.parseInt(key.toString()); final int v=Integer.parseInt(value.toString()); assertEquals("Bad key",0,k % 2); assertEquals("Mismatched key/value",k / 2,v); LOG.debug("read " + k + ","+ v); assertFalse(k + "," + v+ " in multiple partitions.",bits.get(v)); bits.set(v); count++; } if (count > 0) { LOG.info("splits[" + j + "]="+ splits.get(j)+ " count="+ count); } else { LOG.debug("splits[" + j + "]="+ splits.get(j)+ " count="+ count); } } finally { reader.close(); } } assertEquals("Some keys in no partition.",length,bits.cardinality()); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Test using the gzip codec for reading */ @Test public void testGzip() throws IOException, InterruptedException { Configuration conf=new Configuration(defaultConf); CompressionCodec gzip=new GzipCodec(); ReflectionUtils.setConf(gzip,conf); localFs.delete(workDir,true); writeFile(localFs,new Path(workDir,"part1.txt.gz"),gzip,"line-1\tthe quick\nline-2\tbrown\nline-3\t" + "fox jumped\nline-4\tover\nline-5\t the lazy\nline-6\t dog\n"); writeFile(localFs,new Path(workDir,"part2.txt.gz"),gzip,"line-1\tthis is a test\nline-1\tof gzip\n"); Job job=Job.getInstance(conf); FileInputFormat.setInputPaths(job,workDir); KeyValueTextInputFormat format=new KeyValueTextInputFormat(); List splits=format.getSplits(job); assertEquals("compressed splits == 2",2,splits.size()); FileSplit tmp=(FileSplit)splits.get(0); if (tmp.getPath().getName().equals("part2.txt.gz")) { splits.set(0,splits.get(1)); splits.set(1,tmp); } List results=readSplit(format,splits.get(0),job); assertEquals("splits[0] length",6,results.size()); assertEquals("splits[0][0]","the quick",results.get(0).toString()); assertEquals("splits[0][1]","brown",results.get(1).toString()); assertEquals("splits[0][2]","fox jumped",results.get(2).toString()); assertEquals("splits[0][3]","over",results.get(3).toString()); assertEquals("splits[0][4]"," the lazy",results.get(4).toString()); assertEquals("splits[0][5]"," dog",results.get(5).toString()); results=readSplit(format,splits.get(1),job); assertEquals("splits[1] length",2,results.size()); assertEquals("splits[1][0]","this is a test",results.get(0).toString()); assertEquals("splits[1][1]","of gzip",results.get(1).toString()); }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testFormat() throws Exception { Job job=Job.getInstance(new Configuration(defaultConf)); Path file=new Path(workDir,"test.txt"); int seed=new Random().nextInt(); LOG.info("seed = " + seed); Random random=new Random(seed); localFs.delete(workDir,true); FileInputFormat.setInputPaths(job,workDir); final int MAX_LENGTH=10000; for (int length=0; length < MAX_LENGTH; length+=random.nextInt(MAX_LENGTH / 10) + 1) { LOG.debug("creating; entries = " + length); Writer writer=new OutputStreamWriter(localFs.create(file)); try { for (int i=0; i < length; i++) { writer.write(Integer.toString(i * 2)); writer.write("\t"); writer.write(Integer.toString(i)); writer.write("\n"); } } finally { writer.close(); } KeyValueTextInputFormat format=new KeyValueTextInputFormat(); for (int i=0; i < 3; i++) { int numSplits=random.nextInt(MAX_LENGTH / 20) + 1; LOG.debug("splitting: requesting = " + numSplits); List splits=format.getSplits(job); LOG.debug("splitting: got = " + splits.size()); BitSet bits=new BitSet(length); for (int j=0; j < splits.size(); j++) { LOG.debug("split[" + j + "]= "+ splits.get(j)); TaskAttemptContext context=MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration()); RecordReader reader=format.createRecordReader(splits.get(j),context); Class clazz=reader.getClass(); assertEquals("reader class is KeyValueLineRecordReader.",KeyValueLineRecordReader.class,clazz); MapContext mcontext=new MapContextImpl(job.getConfiguration(),context.getTaskAttemptID(),reader,null,null,MapReduceTestUtil.createDummyReporter(),splits.get(j)); reader.initialize(splits.get(j),mcontext); Text key=null; Text value=null; try { int count=0; while (reader.nextKeyValue()) { key=reader.getCurrentKey(); clazz=key.getClass(); assertEquals("Key class is Text.",Text.class,clazz); value=reader.getCurrentValue(); clazz=value.getClass(); assertEquals("Value class is Text.",Text.class,clazz); final int k=Integer.parseInt(key.toString()); final int v=Integer.parseInt(value.toString()); assertEquals("Bad key",0,k % 2); assertEquals("Mismatched key/value",k / 2,v); LOG.debug("read " + v); assertFalse("Key in multiple partitions.",bits.get(v)); bits.set(v); count++; } LOG.debug("splits[" + j + "]="+ splits.get(j)+ " count="+ count); } finally { reader.close(); } } assertEquals("Some keys in no partition.",length,bits.cardinality()); } } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testUTF8() throws Exception { LineReader in=makeStream("abcd\u20acbdcd\u20ac"); Text line=new Text(); in.readLine(line); assertEquals("readLine changed utf8 characters","abcd\u20acbdcd\u20ac",line.toString()); in=makeStream("abc\u200axyz"); in.readLine(line); assertEquals("split on fake newline","abc\u200axyz",line.toString()); }

    Class: org.apache.hadoop.mapreduce.lib.jobcontrol.TestMapReduceJobControl

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testControlledJob() throws Exception { LOG.info("Starting testControlledJob"); Configuration conf=createJobConf(); cleanupData(conf); Job job1=MapReduceTestUtil.createCopyJob(conf,outdir_1,indir); JobControl theControl=createDependencies(conf,job1); while (cjob1.getJobState() != ControlledJob.State.RUNNING) { try { Thread.sleep(100); } catch ( InterruptedException e) { break; } } Assert.assertNotNull(cjob1.getMapredJobId()); waitTillAllFinished(theControl); assertEquals("Some jobs failed",0,theControl.getFailedJobList().size()); theControl.stop(); }

    Class: org.apache.hadoop.mapreduce.lib.jobcontrol.TestMapReduceJobControlWithMocks

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testSuccessfulJobs() throws Exception { JobControl jobControl=new JobControl("Test"); ControlledJob job1=createSuccessfulControlledJob(jobControl); ControlledJob job2=createSuccessfulControlledJob(jobControl); ControlledJob job3=createSuccessfulControlledJob(jobControl,job1,job2); ControlledJob job4=createSuccessfulControlledJob(jobControl,job3); runJobControl(jobControl); assertEquals("Success list",4,jobControl.getSuccessfulJobList().size()); assertEquals("Failed list",0,jobControl.getFailedJobList().size()); assertTrue(job1.getJobState() == ControlledJob.State.SUCCESS); assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS); assertTrue(job3.getJobState() == ControlledJob.State.SUCCESS); assertTrue(job4.getJobState() == ControlledJob.State.SUCCESS); jobControl.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testErrorWhileSubmitting() throws Exception { JobControl jobControl=new JobControl("Test"); Job mockJob=mock(Job.class); ControlledJob job1=new ControlledJob(mockJob,null); when(mockJob.getConfiguration()).thenReturn(new Configuration()); doThrow(new IncompatibleClassChangeError("This is a test")).when(mockJob).submit(); jobControl.addJob(job1); runJobControl(jobControl); try { assertEquals("Success list",0,jobControl.getSuccessfulJobList().size()); assertEquals("Failed list",1,jobControl.getFailedJobList().size()); assertTrue(job1.getJobState() == ControlledJob.State.FAILED); } finally { jobControl.stop(); } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testFailedJob() throws Exception { JobControl jobControl=new JobControl("Test"); ControlledJob job1=createFailedControlledJob(jobControl); ControlledJob job2=createSuccessfulControlledJob(jobControl); ControlledJob job3=createSuccessfulControlledJob(jobControl,job1,job2); ControlledJob job4=createSuccessfulControlledJob(jobControl,job3); runJobControl(jobControl); assertEquals("Success list",1,jobControl.getSuccessfulJobList().size()); assertEquals("Failed list",3,jobControl.getFailedJobList().size()); assertTrue(job1.getJobState() == ControlledJob.State.FAILED); assertTrue(job2.getJobState() == ControlledJob.State.SUCCESS); assertTrue(job3.getJobState() == ControlledJob.State.DEPENDENT_FAILED); assertTrue(job4.getJobState() == ControlledJob.State.DEPENDENT_FAILED); jobControl.stop(); }

    Class: org.apache.hadoop.mapreduce.lib.partition.TestInputSampler

    APIUtilityVerifier IterativeVerifier EqualityVerifier 
    /** * Verify IntervalSampler contract, that samples are taken at regular * intervals from the given splits. */ @Test @SuppressWarnings("unchecked") public void testIntervalSampler() throws Exception { final int TOT_SPLITS=16; final int PER_SPLIT_SAMPLE=4; final int NUM_SAMPLES=TOT_SPLITS * PER_SPLIT_SAMPLE; final double FREQ=1.0 / TOT_SPLITS; InputSampler.Sampler sampler=new InputSampler.IntervalSampler(FREQ,NUM_SAMPLES); int inits[]=new int[TOT_SPLITS]; for (int i=0; i < TOT_SPLITS; ++i) { inits[i]=i; } Job ignored=Job.getInstance(); Object[] samples=sampler.getSample(new TestInputSamplerIF(NUM_SAMPLES,TOT_SPLITS,inits),ignored); assertEquals(NUM_SAMPLES,samples.length); Arrays.sort(samples,new IntWritable.Comparator()); for (int i=0; i < NUM_SAMPLES; ++i) { assertEquals(i,((IntWritable)samples[i]).get()); } }

    APIUtilityVerifier IterativeVerifier EqualityVerifier 
    /** * Verify IntervalSampler in mapred.lib.InputSampler, which is added back * for binary compatibility of M/R 1.x */ @Test(timeout=30000) @SuppressWarnings("unchecked") public void testMapredIntervalSampler() throws Exception { final int TOT_SPLITS=16; final int PER_SPLIT_SAMPLE=4; final int NUM_SAMPLES=TOT_SPLITS * PER_SPLIT_SAMPLE; final double FREQ=1.0 / TOT_SPLITS; org.apache.hadoop.mapred.lib.InputSampler.Sampler sampler=new org.apache.hadoop.mapred.lib.InputSampler.IntervalSampler(FREQ,NUM_SAMPLES); int inits[]=new int[TOT_SPLITS]; for (int i=0; i < TOT_SPLITS; ++i) { inits[i]=i; } Job ignored=Job.getInstance(); Object[] samples=sampler.getSample(new TestInputSamplerIF(NUM_SAMPLES,TOT_SPLITS,inits),ignored); assertEquals(NUM_SAMPLES,samples.length); Arrays.sort(samples,new IntWritable.Comparator()); for (int i=0; i < NUM_SAMPLES; ++i) { assertEquals(i,((IntWritable)samples[i]).get()); } }

    APIUtilityVerifier IterativeVerifier EqualityVerifier 
    /** * Verify SplitSampler contract, that an equal number of records are taken * from the first splits. */ @Test @SuppressWarnings("unchecked") public void testSplitSampler() throws Exception { final int TOT_SPLITS=15; final int NUM_SPLITS=5; final int STEP_SAMPLE=5; final int NUM_SAMPLES=NUM_SPLITS * STEP_SAMPLE; InputSampler.Sampler sampler=new InputSampler.SplitSampler(NUM_SAMPLES,NUM_SPLITS); int inits[]=new int[TOT_SPLITS]; for (int i=0; i < TOT_SPLITS; ++i) { inits[i]=i * STEP_SAMPLE; } Job ignored=Job.getInstance(); Object[] samples=sampler.getSample(new TestInputSamplerIF(100000,TOT_SPLITS,inits),ignored); assertEquals(NUM_SAMPLES,samples.length); Arrays.sort(samples,new IntWritable.Comparator()); for (int i=0; i < NUM_SAMPLES; ++i) { assertEquals(i,((IntWritable)samples[i]).get()); } }

    IterativeVerifier EqualityVerifier 
    /** * Verify SplitSampler contract in mapred.lib.InputSampler, which is added * back for binary compatibility of M/R 1.x */ @Test(timeout=30000) @SuppressWarnings("unchecked") public void testMapredSplitSampler() throws Exception { final int TOT_SPLITS=15; final int NUM_SPLITS=5; final int STEP_SAMPLE=5; final int NUM_SAMPLES=NUM_SPLITS * STEP_SAMPLE; org.apache.hadoop.mapred.lib.InputSampler.Sampler sampler=new org.apache.hadoop.mapred.lib.InputSampler.SplitSampler(NUM_SAMPLES,NUM_SPLITS); int inits[]=new int[TOT_SPLITS]; for (int i=0; i < TOT_SPLITS; ++i) { inits[i]=i * STEP_SAMPLE; } Object[] samples=sampler.getSample(new TestMapredInputSamplerIF(100000,TOT_SPLITS,inits),new JobConf()); assertEquals(NUM_SAMPLES,samples.length); Arrays.sort(samples,new IntWritable.Comparator()); for (int i=0; i < NUM_SAMPLES; ++i) { assertEquals(i % STEP_SAMPLE + TOT_SPLITS * (i / STEP_SAMPLE),((IntWritable)samples[i]).get()); } }

    Class: org.apache.hadoop.mapreduce.security.TestBinaryTokenFile

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    /** * run a distributed job and verify that TokenCache is available * @throws IOException */ @Test public void testBinaryTokenFile() throws IOException { Configuration conf=mrCluster.getConfig(); final String nnUri=dfsCluster.getURI(0).toString(); conf.set(MRJobConfig.JOB_NAMENODES,nnUri + "," + nnUri); final String[] args={"-m","1","-r","1","-mt","1","-rt","1"}; int res=-1; try { res=ToolRunner.run(conf,new MySleepJob(),args); } catch ( Exception e) { System.out.println("Job failed with " + e.getLocalizedMessage()); e.printStackTrace(System.out); fail("Job failed"); } assertEquals("dist job res is not 0:",0,res); }

    Class: org.apache.hadoop.mapreduce.security.TestJHSSecurity

    UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testDelegationToken() throws IOException, InterruptedException { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); final YarnConfiguration conf=new YarnConfiguration(new JobConf()); conf.set(JHAdminConfig.MR_HISTORY_PRINCIPAL,"RandomOrc/localhost@apache.org"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); final long initialInterval=10000l; final long maxLifetime=20000l; final long renewInterval=10000l; JobHistoryServer jobHistoryServer=null; MRClientProtocol clientUsingDT=null; long tokenFetchTime; try { jobHistoryServer=new JobHistoryServer(){ protected void doSecureLogin( Configuration conf) throws IOException { } @Override protected JHSDelegationTokenSecretManager createJHSSecretManager( Configuration conf, HistoryServerStateStoreService store){ return new JHSDelegationTokenSecretManager(initialInterval,maxLifetime,renewInterval,3600000,store); } @Override protected HistoryClientService createHistoryClientService(){ return new HistoryClientService(historyContext,this.jhsDTSecretManager){ @Override protected void initializeWebApp( Configuration conf){ } } ; } } ; jobHistoryServer.init(conf); jobHistoryServer.start(); final MRClientProtocol hsService=jobHistoryServer.getClientService().getClientHandler(); UserGroupInformation loggedInUser=UserGroupInformation.createRemoteUser("testrenewer@APACHE.ORG"); Assert.assertEquals("testrenewer",loggedInUser.getShortUserName()); loggedInUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS); Token token=getDelegationToken(loggedInUser,hsService,loggedInUser.getShortUserName()); tokenFetchTime=System.currentTimeMillis(); LOG.info("Got delegation token at: " + tokenFetchTime); clientUsingDT=getMRClientProtocol(token,jobHistoryServer.getClientService().getBindAddress(),"TheDarkLord",conf); GetJobReportRequest jobReportRequest=Records.newRecord(GetJobReportRequest.class); jobReportRequest.setJobId(MRBuilderUtils.newJobId(123456,1,1)); try { clientUsingDT.getJobReport(jobReportRequest); } catch ( IOException e) { Assert.assertEquals("Unknown job job_123456_0001",e.getMessage()); } while (System.currentTimeMillis() < tokenFetchTime + initialInterval / 2) { Thread.sleep(500l); } long nextExpTime=renewDelegationToken(loggedInUser,hsService,token); long renewalTime=System.currentTimeMillis(); LOG.info("Renewed token at: " + renewalTime + ", NextExpiryTime: "+ nextExpTime); while (System.currentTimeMillis() > tokenFetchTime + initialInterval && System.currentTimeMillis() < nextExpTime) { Thread.sleep(500l); } Thread.sleep(50l); try { clientUsingDT.getJobReport(jobReportRequest); } catch ( IOException e) { Assert.assertEquals("Unknown job job_123456_0001",e.getMessage()); } while (System.currentTimeMillis() < renewalTime + renewInterval) { Thread.sleep(500l); } Thread.sleep(50l); LOG.info("At time: " + System.currentTimeMillis() + ", token should be invalid"); try { clientUsingDT.getJobReport(jobReportRequest); fail("Should not have succeeded with an expired token"); } catch ( IOException e) { assertTrue(e.getCause().getMessage().contains("is expired")); } if (clientUsingDT != null) { clientUsingDT=null; } token=getDelegationToken(loggedInUser,hsService,loggedInUser.getShortUserName()); tokenFetchTime=System.currentTimeMillis(); LOG.info("Got delegation token at: " + tokenFetchTime); clientUsingDT=getMRClientProtocol(token,jobHistoryServer.getClientService().getBindAddress(),"loginuser2",conf); try { clientUsingDT.getJobReport(jobReportRequest); } catch ( IOException e) { fail("Unexpected exception" + e); } cancelDelegationToken(loggedInUser,hsService,token); Token tokenWithDifferentRenewer=getDelegationToken(loggedInUser,hsService,"yarn"); cancelDelegationToken(loggedInUser,hsService,tokenWithDifferentRenewer); if (clientUsingDT != null) { clientUsingDT=null; } clientUsingDT=getMRClientProtocol(token,jobHistoryServer.getClientService().getBindAddress(),"loginuser2",conf); LOG.info("Cancelled delegation token at: " + System.currentTimeMillis()); try { clientUsingDT.getJobReport(jobReportRequest); fail("Should not have succeeded with a cancelled delegation token"); } catch ( IOException e) { } } finally { jobHistoryServer.stop(); } }

    Class: org.apache.hadoop.mapreduce.security.TestMRCredentials

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    /** * run a distributed job and verify that TokenCache is available * @throws IOException */ @Test public void test() throws IOException { Configuration jobConf=new JobConf(mrCluster.getConfig()); NameNode nn=dfsCluster.getNameNode(); URI nnUri=NameNode.getUri(nn.getNameNodeAddress()); jobConf.set(JobContext.JOB_NAMENODES,nnUri + "," + nnUri.toString()); jobConf.set("mapreduce.job.credentials.json","keys.json"); String[] args={"-m","1","-r","1","-mt","1","-rt","1"}; int res=-1; try { res=ToolRunner.run(jobConf,new CredentialsTestJob(),args); } catch ( Exception e) { System.out.println("Job failed with" + e.getLocalizedMessage()); e.printStackTrace(System.out); fail("Job failed"); } assertEquals("dist job res is not 0",res,0); }

    Class: org.apache.hadoop.mapreduce.security.token.delegation.TestDelegationToken

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("deprecation") @Test public void testDelegationToken() throws Exception { final JobClient client; client=user1.doAs(new PrivilegedExceptionAction(){ @Override public JobClient run() throws Exception { return new JobClient(cluster.createJobConf()); } } ); final JobClient bobClient; bobClient=user2.doAs(new PrivilegedExceptionAction(){ @Override public JobClient run() throws Exception { return new JobClient(cluster.createJobConf()); } } ); final Token token=client.getDelegationToken(new Text(user1.getUserName())); DataInputBuffer inBuf=new DataInputBuffer(); byte[] bytes=token.getIdentifier(); inBuf.reset(bytes,bytes.length); DelegationTokenIdentifier ident=new DelegationTokenIdentifier(); ident.readFields(inBuf); assertEquals("alice",ident.getUser().getUserName()); long createTime=ident.getIssueDate(); long maxTime=ident.getMaxDate(); long currentTime=System.currentTimeMillis(); System.out.println("create time: " + createTime); System.out.println("current time: " + currentTime); System.out.println("max time: " + maxTime); assertTrue("createTime < current",createTime < currentTime); assertTrue("current < maxTime",currentTime < maxTime); user1.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { client.renewDelegationToken(token); client.renewDelegationToken(token); return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { bobClient.renewDelegationToken(token); Assert.fail("bob renew"); } catch ( AccessControlException ace) { } return null; } } ); user2.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { try { bobClient.cancelDelegationToken(token); Assert.fail("bob cancel"); } catch ( AccessControlException ace) { } return null; } } ); user1.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { client.cancelDelegationToken(token); try { client.cancelDelegationToken(token); Assert.fail("second alice cancel"); } catch ( InvalidToken it) { } return null; } } ); }

    Class: org.apache.hadoop.mapreduce.split.TestJobSplitWriter

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testMaxBlockLocationsOldSplits() throws Exception { TEST_DIR.mkdirs(); try { Configuration conf=new Configuration(); conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,4); Path submitDir=new Path(TEST_DIR.getAbsolutePath()); FileSystem fs=FileSystem.getLocal(conf); org.apache.hadoop.mapred.FileSplit split=new org.apache.hadoop.mapred.FileSplit(new Path("/some/path"),0,1,new String[]{"loc1","loc2","loc3","loc4","loc5"}); JobSplitWriter.createSplitFiles(submitDir,conf,fs,new org.apache.hadoop.mapred.InputSplit[]{split}); JobSplit.TaskSplitMetaInfo[] infos=SplitMetaInfoReader.readSplitMetaInfo(new JobID(),fs,conf,submitDir); assertEquals("unexpected number of splits",1,infos.length); assertEquals("unexpected number of split locations",4,infos[0].getLocations().length); } finally { FileUtil.fullyDelete(TEST_DIR); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testMaxBlockLocationsNewSplits() throws Exception { TEST_DIR.mkdirs(); try { Configuration conf=new Configuration(); conf.setInt(MRConfig.MAX_BLOCK_LOCATIONS_KEY,4); Path submitDir=new Path(TEST_DIR.getAbsolutePath()); FileSystem fs=FileSystem.getLocal(conf); FileSplit split=new FileSplit(new Path("/some/path"),0,1,new String[]{"loc1","loc2","loc3","loc4","loc5"}); JobSplitWriter.createSplitFiles(submitDir,conf,fs,new FileSplit[]{split}); JobSplit.TaskSplitMetaInfo[] infos=SplitMetaInfoReader.readSplitMetaInfo(new JobID(),fs,conf,submitDir); assertEquals("unexpected number of splits",1,infos.length); assertEquals("unexpected number of split locations",4,infos[0].getLocations().length); } finally { FileUtil.fullyDelete(TEST_DIR); } }

    Class: org.apache.hadoop.mapreduce.task.reduce.TestMergeManager

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testMemoryMerge() throws Exception { final int TOTAL_MEM_BYTES=10000; final int OUTPUT_SIZE=7950; JobConf conf=new JobConf(); conf.setFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT,1.0f); conf.setLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,TOTAL_MEM_BYTES); conf.setFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT,0.8f); conf.setFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT,0.9f); TestExceptionReporter reporter=new TestExceptionReporter(); CyclicBarrier mergeStart=new CyclicBarrier(2); CyclicBarrier mergeComplete=new CyclicBarrier(2); StubbedMergeManager mgr=new StubbedMergeManager(conf,reporter,mergeStart,mergeComplete); MapOutput out1=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput)); InMemoryMapOutput mout1=(InMemoryMapOutput)out1; fillOutput(mout1); MapOutput out2=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput)); InMemoryMapOutput mout2=(InMemoryMapOutput)out2; fillOutput(mout2); MapOutput out3=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertEquals("Should be told to wait",null,out3); mout1.commit(); mout2.commit(); mergeStart.await(); Assert.assertEquals(1,mgr.getNumMerges()); out1=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertTrue("Should be a memory merge",(out1 instanceof InMemoryMapOutput)); mout1=(InMemoryMapOutput)out1; fillOutput(mout1); out2=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertTrue("Should be a memory merge",(out2 instanceof InMemoryMapOutput)); mout2=(InMemoryMapOutput)out2; fillOutput(mout2); out3=mgr.reserve(null,OUTPUT_SIZE,0); Assert.assertEquals("Should be told to wait",null,out3); mout1.commit(); mout2.commit(); mergeComplete.await(); mergeStart.await(); Assert.assertEquals(2,mgr.getNumMerges()); mergeComplete.await(); Assert.assertEquals(2,mgr.getNumMerges()); Assert.assertEquals("exception reporter invoked",0,reporter.getNumExceptions()); }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings({"unchecked","deprecation"}) @Test(timeout=10000) public void testOnDiskMerger() throws IOException, URISyntaxException, InterruptedException { JobConf jobConf=new JobConf(); final int SORT_FACTOR=5; jobConf.setInt(MRJobConfig.IO_SORT_FACTOR,SORT_FACTOR); MapOutputFile mapOutputFile=new MROutputFiles(); FileSystem fs=FileSystem.getLocal(jobConf); MergeManagerImpl manager=new MergeManagerImpl(null,jobConf,fs,null,null,null,null,null,null,null,null,null,null,mapOutputFile); MergeThread,IntWritable,IntWritable> onDiskMerger=(MergeThread,IntWritable,IntWritable>)Whitebox.getInternalState(manager,"onDiskMerger"); int mergeFactor=(Integer)Whitebox.getInternalState(onDiskMerger,"mergeFactor"); assertEquals(mergeFactor,SORT_FACTOR); onDiskMerger.suspend(); Random rand=new Random(); for (int i=0; i < 2 * SORT_FACTOR; ++i) { Path path=new Path("somePath"); CompressAwarePath cap=new CompressAwarePath(path,1l,rand.nextInt()); manager.closeOnDiskFile(cap); } LinkedList> pendingToBeMerged=(LinkedList>)Whitebox.getInternalState(onDiskMerger,"pendingToBeMerged"); assertTrue("No inputs were added to list pending to merge",pendingToBeMerged.size() > 0); for (int i=0; i < pendingToBeMerged.size(); ++i) { List inputs=pendingToBeMerged.get(i); for (int j=1; j < inputs.size(); ++j) { assertTrue("Not enough / too many inputs were going to be merged",inputs.size() > 0 && inputs.size() <= SORT_FACTOR); assertTrue("Inputs to be merged were not sorted according to size: ",inputs.get(j).getCompressedSize() >= inputs.get(j - 1).getCompressedSize()); } } }

    Class: org.apache.hadoop.mapreduce.task.reduce.TestMerger

    InternalCallVerifier EqualityVerifier 
    @Test public void testInMemoryAndOnDiskMerger() throws Throwable { JobID jobId=new JobID("a",0); TaskAttemptID reduceId1=new TaskAttemptID(new TaskID(jobId,TaskType.REDUCE,0),0); TaskAttemptID mapId1=new TaskAttemptID(new TaskID(jobId,TaskType.MAP,1),0); TaskAttemptID mapId2=new TaskAttemptID(new TaskID(jobId,TaskType.MAP,2),0); LocalDirAllocator lda=new LocalDirAllocator(MRConfig.LOCAL_DIR); MergeManagerImpl mergeManager=new MergeManagerImpl(reduceId1,jobConf,fs,lda,Reporter.NULL,null,null,null,null,null,null,null,new Progress(),new MROutputFiles()); Map map1=new TreeMap(); map1.put("apple","disgusting"); map1.put("carrot","delicious"); Map map2=new TreeMap(); map1.put("banana","pretty good"); byte[] mapOutputBytes1=writeMapOutput(conf,map1); byte[] mapOutputBytes2=writeMapOutput(conf,map2); InMemoryMapOutput mapOutput1=new InMemoryMapOutput(conf,mapId1,mergeManager,mapOutputBytes1.length,null,true); InMemoryMapOutput mapOutput2=new InMemoryMapOutput(conf,mapId2,mergeManager,mapOutputBytes2.length,null,true); System.arraycopy(mapOutputBytes1,0,mapOutput1.getMemory(),0,mapOutputBytes1.length); System.arraycopy(mapOutputBytes2,0,mapOutput2.getMemory(),0,mapOutputBytes2.length); MergeThread,Text,Text> inMemoryMerger=mergeManager.createInMemoryMerger(); List> mapOutputs1=new ArrayList>(); mapOutputs1.add(mapOutput1); mapOutputs1.add(mapOutput2); inMemoryMerger.merge(mapOutputs1); Assert.assertEquals(1,mergeManager.onDiskMapOutputs.size()); TaskAttemptID reduceId2=new TaskAttemptID(new TaskID(jobId,TaskType.REDUCE,3),0); TaskAttemptID mapId3=new TaskAttemptID(new TaskID(jobId,TaskType.MAP,4),0); TaskAttemptID mapId4=new TaskAttemptID(new TaskID(jobId,TaskType.MAP,5),0); Map map3=new TreeMap(); map3.put("apple","awesome"); map3.put("carrot","amazing"); Map map4=new TreeMap(); map4.put("banana","bla"); byte[] mapOutputBytes3=writeMapOutput(conf,map3); byte[] mapOutputBytes4=writeMapOutput(conf,map4); InMemoryMapOutput mapOutput3=new InMemoryMapOutput(conf,mapId3,mergeManager,mapOutputBytes3.length,null,true); InMemoryMapOutput mapOutput4=new InMemoryMapOutput(conf,mapId4,mergeManager,mapOutputBytes4.length,null,true); System.arraycopy(mapOutputBytes3,0,mapOutput3.getMemory(),0,mapOutputBytes3.length); System.arraycopy(mapOutputBytes4,0,mapOutput4.getMemory(),0,mapOutputBytes4.length); MergeThread,Text,Text> inMemoryMerger2=mergeManager.createInMemoryMerger(); List> mapOutputs2=new ArrayList>(); mapOutputs2.add(mapOutput3); mapOutputs2.add(mapOutput4); inMemoryMerger2.merge(mapOutputs2); Assert.assertEquals(2,mergeManager.onDiskMapOutputs.size()); List paths=new ArrayList(); Iterator iterator=mergeManager.onDiskMapOutputs.iterator(); List keys=new ArrayList(); List values=new ArrayList(); while (iterator.hasNext()) { CompressAwarePath next=iterator.next(); readOnDiskMapOutput(conf,fs,next,keys,values); paths.add(next); } Assert.assertEquals(keys,Arrays.asList("apple","banana","carrot","apple","banana","carrot")); Assert.assertEquals(values,Arrays.asList("awesome","bla","amazing","disgusting","pretty good","delicious")); mergeManager.close(); mergeManager=new MergeManagerImpl(reduceId2,jobConf,fs,lda,Reporter.NULL,null,null,null,null,null,null,null,new Progress(),new MROutputFiles()); MergeThread onDiskMerger=mergeManager.createOnDiskMerger(); onDiskMerger.merge(paths); Assert.assertEquals(1,mergeManager.onDiskMapOutputs.size()); keys=new ArrayList(); values=new ArrayList(); readOnDiskMapOutput(conf,fs,mergeManager.onDiskMapOutputs.iterator().next(),keys,values); Assert.assertEquals(keys,Arrays.asList("apple","apple","banana","banana","carrot","carrot")); Assert.assertEquals(values,Arrays.asList("awesome","disgusting","pretty good","bla","amazing","delicious")); mergeManager.close(); Assert.assertEquals(0,mergeManager.inMemoryMapOutputs.size()); Assert.assertEquals(0,mergeManager.inMemoryMergedMapOutputs.size()); Assert.assertEquals(0,mergeManager.onDiskMapOutputs.size()); }

    Class: org.apache.hadoop.mapreduce.task.reduce.TestShuffleScheduler

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("rawtypes") @Test public void testTipFailed() throws Exception { JobConf job=new JobConf(); job.setNumMapTasks(2); TaskStatus status=new TaskStatus(){ @Override public boolean getIsMap(){ return false; } @Override public void addFetchFailedMap( TaskAttemptID mapTaskId){ } } ; Progress progress=new Progress(); TaskAttemptID reduceId=new TaskAttemptID("314159",0,TaskType.REDUCE,0,0); ShuffleSchedulerImpl scheduler=new ShuffleSchedulerImpl(job,status,reduceId,null,progress,null,null,null); JobID jobId=new JobID(); TaskID taskId1=new TaskID(jobId,TaskType.REDUCE,1); scheduler.tipFailed(taskId1); Assert.assertEquals("Progress should be 0.5",0.5f,progress.getProgress(),0.0f); Assert.assertFalse(scheduler.waitUntilDone(1)); TaskID taskId0=new TaskID(jobId,TaskType.REDUCE,0); scheduler.tipFailed(taskId0); Assert.assertEquals("Progress should be 1.0",1.0f,progress.getProgress(),0.0f); Assert.assertTrue(scheduler.waitUntilDone(1)); }

    Class: org.apache.hadoop.mapreduce.tools.TestCLI

    InternalCallVerifier EqualityVerifier 
    @Test public void testListAttemptIdsWithValidInput() throws Exception { JobID jobId=JobID.forName(jobIdStr); Cluster mockCluster=mock(Cluster.class); Job job=mock(Job.class); CLI cli=spy(new CLI()); doReturn(mockCluster).when(cli).createCluster(); when(job.getTaskReports(TaskType.MAP)).thenReturn(getTaskReports(jobId,TaskType.MAP)); when(job.getTaskReports(TaskType.REDUCE)).thenReturn(getTaskReports(jobId,TaskType.REDUCE)); when(mockCluster.getJob(jobId)).thenReturn(job); int retCode_MAP=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"MAP","running"}); int retCode_map=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"map","running"}); int retCode_REDUCE=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"REDUCE","running"}); int retCode_completed=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"REDUCE","completed"}); assertEquals("MAP is a valid input,exit code should be 0",0,retCode_MAP); assertEquals("map is a valid input,exit code should be 0",0,retCode_map); assertEquals("REDUCE is a valid input,exit code should be 0",0,retCode_REDUCE); assertEquals("REDUCE and completed are a valid inputs to -list-attempt-ids,exit code should be 0",0,retCode_completed); verify(job,times(2)).getTaskReports(TaskType.MAP); verify(job,times(2)).getTaskReports(TaskType.REDUCE); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testListAttemptIdsWithInvalidInputs() throws Exception { JobID jobId=JobID.forName(jobIdStr); Cluster mockCluster=mock(Cluster.class); Job job=mock(Job.class); CLI cli=spy(new CLI()); doReturn(mockCluster).when(cli).createCluster(); when(mockCluster.getJob(jobId)).thenReturn(job); int retCode_JOB_SETUP=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"JOB_SETUP","running"}); int retCode_JOB_CLEANUP=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"JOB_CLEANUP","running"}); int retCode_invalidTaskState=cli.run(new String[]{"-list-attempt-ids",jobIdStr,"REDUCE","complete"}); assertEquals("JOB_SETUP is an invalid input,exit code should be -1",-1,retCode_JOB_SETUP); assertEquals("JOB_CLEANUP is an invalid input,exit code should be -1",-1,retCode_JOB_CLEANUP); assertEquals("complete is an invalid input,exit code should be -1",-1,retCode_invalidTaskState); }

    Class: org.apache.hadoop.mapreduce.util.TestMRAsyncDiskService

    InternalCallVerifier EqualityVerifier 
    @Test public void testRelativeToWorking(){ assertEquals(".",relativeToWorking(System.getProperty("user.dir","."))); String cwd=System.getProperty("user.dir","."); Path cwdPath=new Path(cwd); Path subdir=new Path(cwdPath,"foo"); assertEquals("foo",relativeToWorking(subdir.toUri().getPath())); Path subsubdir=new Path(subdir,"bar"); assertEquals("foo/bar",relativeToWorking(subsubdir.toUri().getPath())); Path parent=new Path(cwdPath,".."); assertEquals("..",relativeToWorking(parent.toUri().getPath())); Path sideways=new Path(parent,"baz"); assertEquals("../baz",relativeToWorking(sideways.toUri().getPath())); }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testToleratesSomeUnwritableVolumes() throws Throwable { FileSystem localFileSystem=FileSystem.getLocal(new Configuration()); String[] vols=new String[]{TEST_ROOT_DIR + "/0",TEST_ROOT_DIR + "/1"}; assertTrue(new File(vols[0]).mkdirs()); assertEquals(0,FileUtil.chmod(vols[0],"400")); try { new MRAsyncDiskService(localFileSystem,vols); } finally { FileUtil.chmod(vols[0],"755"); } }

    Class: org.apache.hadoop.mapreduce.v2.TestMRAMWithNonNormalizedCapabilities

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * To ensure nothing broken after we removed normalization * from the MRAM side * @throws Exception */ @Test public void testJobWithNonNormalizedCapabilities() throws Exception { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } JobConf jobConf=new JobConf(mrCluster.getConfig()); jobConf.setInt("mapreduce.map.memory.mb",700); jobConf.setInt("mapred.reduce.memory.mb",1500); SleepJob sleepJob=new SleepJob(); sleepJob.setConf(jobConf); Job job=sleepJob.createJob(3,2,1000,1,500,1); job.setJarByClass(SleepJob.class); job.addFileToClassPath(APP_JAR); job.submit(); boolean completed=job.waitForCompletion(true); Assert.assertTrue("Job should be completed",completed); Assert.assertEquals("Job should be finished successfully",JobStatus.State.SUCCEEDED,job.getJobState()); }

    Class: org.apache.hadoop.mapreduce.v2.TestMRJobs

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=300000) public void testSleepJob() throws IOException, InterruptedException, ClassNotFoundException { LOG.info("\n\n\nStarting testSleepJob()."); if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } Configuration sleepConf=new Configuration(mrCluster.getConfig()); sleepConf.set(MRConfig.MASTER_ADDRESS,"local"); SleepJob sleepJob=new SleepJob(); sleepJob.setConf(sleepConf); int numReduces=sleepConf.getInt("TestMRJobs.testSleepJob.reduces",2); Job job=sleepJob.createJob(3,numReduces,10000,1,5000,1); job.addFileToClassPath(APP_JAR); job.setJarByClass(SleepJob.class); job.setMaxMapAttempts(1); job.submit(); String trackingUrl=job.getTrackingURL(); String jobId=job.getJobID().toString(); boolean succeeded=job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState()); Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/")); verifySleepJobCounters(job); verifyTaskProgress(job); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testRandomWriter() throws IOException, InterruptedException, ClassNotFoundException { LOG.info("\n\n\nStarting testRandomWriter()."); if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } RandomTextWriterJob randomWriterJob=new RandomTextWriterJob(); mrCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES,"3072"); mrCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP,"1024"); Job job=randomWriterJob.createJob(mrCluster.getConfig()); Path outputDir=new Path(OUTPUT_ROOT_DIR,"random-output"); FileOutputFormat.setOutputPath(job,outputDir); job.setSpeculativeExecution(false); job.addFileToClassPath(APP_JAR); job.setJarByClass(RandomTextWriterJob.class); job.setMaxMapAttempts(1); job.submit(); String trackingUrl=job.getTrackingURL(); String jobId=job.getJobID().toString(); boolean succeeded=job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState()); Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID "+ jobId,trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/")); RemoteIterator iterator=FileContext.getFileContext(mrCluster.getConfig()).listStatus(outputDir); int count=0; while (iterator.hasNext()) { FileStatus file=iterator.next(); if (!file.getPath().getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) { count++; } } Assert.assertEquals("Number of part files is wrong!",3,count); verifyRandomWriterCounters(job); }

    IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=120000) public void testContainerRollingLog() throws IOException, InterruptedException, ClassNotFoundException { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } final SleepJob sleepJob=new SleepJob(); final JobConf sleepConf=new JobConf(mrCluster.getConfig()); sleepConf.set(MRJobConfig.MAP_LOG_LEVEL,Level.ALL.toString()); final long userLogKb=4; sleepConf.setLong(MRJobConfig.TASK_USERLOG_LIMIT,userLogKb); sleepConf.setInt(MRJobConfig.TASK_LOG_BACKUPS,3); sleepConf.set(MRJobConfig.MR_AM_LOG_LEVEL,Level.ALL.toString()); final long amLogKb=7; sleepConf.setLong(MRJobConfig.MR_AM_LOG_KB,amLogKb); sleepConf.setInt(MRJobConfig.MR_AM_LOG_BACKUPS,7); sleepJob.setConf(sleepConf); final Job job=sleepJob.createJob(1,0,1L,100,0L,0); job.setJarByClass(SleepJob.class); job.addFileToClassPath(APP_JAR); job.waitForCompletion(true); final JobId jobId=TypeConverter.toYarn(job.getJobID()); final ApplicationId appID=jobId.getAppId(); int pollElapsed=0; while (true) { Thread.sleep(1000); pollElapsed+=1000; if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) { break; } if (pollElapsed >= 60000) { LOG.warn("application did not reach terminal state within 60 seconds"); break; } } Assert.assertEquals(RMAppState.FINISHED,mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState()); final String appIdStr=appID.toString(); final String appIdSuffix=appIdStr.substring("application_".length(),appIdStr.length()); final String containerGlob="container_" + appIdSuffix + "_*_*"; final String syslogGlob=appIdStr + Path.SEPARATOR + containerGlob+ Path.SEPARATOR+ TaskLog.LogName.SYSLOG; int numAppMasters=0; int numMapTasks=0; for (int i=0; i < NUM_NODE_MGRS; i++) { final Configuration nmConf=mrCluster.getNodeManager(i).getConfig(); for ( String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) { final Path absSyslogGlob=new Path(logDir + Path.SEPARATOR + syslogGlob); LOG.info("Checking for glob: " + absSyslogGlob); final FileStatus[] syslogs=localFs.globStatus(absSyslogGlob); for ( FileStatus slog : syslogs) { boolean foundAppMaster=job.isUber(); final Path containerPathComponent=slog.getPath().getParent(); if (!foundAppMaster) { final ContainerId cid=ConverterUtils.toContainerId(containerPathComponent.getName()); foundAppMaster=(cid.getId() == 1); } final FileStatus[] sysSiblings=localFs.globStatus(new Path(containerPathComponent,TaskLog.LogName.SYSLOG + "*")); Arrays.sort(sysSiblings); if (foundAppMaster) { numAppMasters++; } else { numMapTasks++; } if (foundAppMaster) { Assert.assertSame("Unexpected number of AM sylog* files",sleepConf.getInt(MRJobConfig.MR_AM_LOG_BACKUPS,0) + 1,sysSiblings.length); Assert.assertTrue("AM syslog.1 length kb should be >= " + amLogKb,sysSiblings[1].getLen() >= amLogKb * 1024); } else { Assert.assertSame("Unexpected number of MR task sylog* files",sleepConf.getInt(MRJobConfig.TASK_LOG_BACKUPS,0) + 1,sysSiblings.length); Assert.assertTrue("MR syslog.1 length kb should be >= " + userLogKb,sysSiblings[1].getLen() >= userLogKb * 1024); } } } } Assert.assertEquals("No AppMaster log found!",1,numAppMasters); if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false)) { Assert.assertEquals("MapTask log with uber found!",0,numMapTasks); } else { Assert.assertEquals("No MapTask log found!",1,numMapTasks); } }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testFailingMapper() throws IOException, InterruptedException, ClassNotFoundException { LOG.info("\n\n\nStarting testFailingMapper()."); if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } Job job=runFailingMapperJob(); TaskID taskID=new TaskID(job.getJobID(),TaskType.MAP,0); TaskAttemptID aId=new TaskAttemptID(taskID,0); System.out.println("Diagnostics for " + aId + " :"); for ( String diag : job.getTaskDiagnostics(aId)) { System.out.println(diag); } aId=new TaskAttemptID(taskID,1); System.out.println("Diagnostics for " + aId + " :"); for ( String diag : job.getTaskDiagnostics(aId)) { System.out.println(diag); } TaskCompletionEvent[] events=job.getTaskCompletionEvents(0,2); Assert.assertEquals(TaskCompletionEvent.Status.FAILED,events[0].getStatus()); Assert.assertEquals(TaskCompletionEvent.Status.TIPFAILED,events[1].getStatus()); Assert.assertEquals(JobStatus.State.FAILED,job.getJobState()); verifyFailingMapperCounters(job); }

    Class: org.apache.hadoop.mapreduce.v2.TestMRJobsWithHistoryService

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=90000) public void testJobHistoryData() throws IOException, InterruptedException, AvroRemoteException, ClassNotFoundException { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } SleepJob sleepJob=new SleepJob(); sleepJob.setConf(mrCluster.getConfig()); Job job=sleepJob.createJob(3,2,1000,1,500,1); job.setJarByClass(SleepJob.class); job.addFileToClassPath(APP_JAR); job.waitForCompletion(true); Counters counterMR=job.getCounters(); JobId jobId=TypeConverter.toYarn(job.getJobID()); ApplicationId appID=jobId.getAppId(); int pollElapsed=0; while (true) { Thread.sleep(1000); pollElapsed+=1000; if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) { break; } if (pollElapsed >= 60000) { LOG.warn("application did not reach terminal state within 60 seconds"); break; } } Assert.assertEquals(RMAppState.FINISHED,mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState()); Counters counterHS=job.getCounters(); LOG.info("CounterHS " + counterHS); LOG.info("CounterMR " + counterMR); Assert.assertEquals(counterHS,counterMR); HSClientProtocol historyClient=instantiateHistoryProxy(); GetJobReportRequest gjReq=Records.newRecord(GetJobReportRequest.class); gjReq.setJobId(jobId); JobReport jobReport=historyClient.getJobReport(gjReq).getJobReport(); verifyJobReport(jobReport,jobId); }

    Class: org.apache.hadoop.mapreduce.v2.TestRMNMInfo

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testRMNMInfo() throws Exception { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } RMContext rmc=mrCluster.getResourceManager().getRMContext(); ResourceScheduler rms=mrCluster.getResourceManager().getResourceScheduler(); RMNMInfo rmInfo=new RMNMInfo(rmc,rms); String liveNMs=rmInfo.getLiveNodeManagers(); ObjectMapper mapper=new ObjectMapper(); JsonNode jn=mapper.readTree(liveNMs); Assert.assertEquals("Unexpected number of live nodes:",NUMNODEMANAGERS,jn.size()); Iterator it=jn.iterator(); while (it.hasNext()) { JsonNode n=it.next(); Assert.assertNotNull(n.get("HostName")); Assert.assertNotNull(n.get("Rack")); Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",n.get("State").asText().contains("RUNNING")); Assert.assertNotNull(n.get("NodeHTTPAddress")); Assert.assertNotNull(n.get("LastHealthUpdate")); Assert.assertNotNull(n.get("HealthReport")); Assert.assertNotNull(n.get("NodeManagerVersion")); Assert.assertNotNull(n.get("NumContainers")); Assert.assertEquals(n.get("NodeId") + ": Unexpected number of used containers",0,n.get("NumContainers").asInt()); Assert.assertEquals(n.get("NodeId") + ": Unexpected amount of used memory",0,n.get("UsedMemoryMB").asInt()); Assert.assertNotNull(n.get("AvailableMemoryMB")); } }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testRMNMInfoMissmatch() throws Exception { RMContext rmc=mock(RMContext.class); ResourceScheduler rms=mock(ResourceScheduler.class); ConcurrentMap map=new ConcurrentHashMap(); RMNode node=MockNodes.newNodeInfo(1,MockNodes.newResource(4 * 1024)); map.put(node.getNodeID(),node); when(rmc.getRMNodes()).thenReturn(map); RMNMInfo rmInfo=new RMNMInfo(rmc,rms); String liveNMs=rmInfo.getLiveNodeManagers(); ObjectMapper mapper=new ObjectMapper(); JsonNode jn=mapper.readTree(liveNMs); Assert.assertEquals("Unexpected number of live nodes:",1,jn.size()); Iterator it=jn.iterator(); while (it.hasNext()) { JsonNode n=it.next(); Assert.assertNotNull(n.get("HostName")); Assert.assertNotNull(n.get("Rack")); Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING",n.get("State").asText().contains("RUNNING")); Assert.assertNotNull(n.get("NodeHTTPAddress")); Assert.assertNotNull(n.get("LastHealthUpdate")); Assert.assertNotNull(n.get("HealthReport")); Assert.assertNotNull(n.get("NodeManagerVersion")); Assert.assertNull(n.get("NumContainers")); Assert.assertNull(n.get("UsedMemoryMB")); Assert.assertNull(n.get("AvailableMemoryMB")); } }

    Class: org.apache.hadoop.mapreduce.v2.TestRecordFactory

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testPbRecordFactory(){ RecordFactory pbRecordFactory=RecordFactoryPBImpl.get(); try { CounterGroup response=pbRecordFactory.newRecordInstance(CounterGroup.class); Assert.assertEquals(CounterGroupPBImpl.class,response.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } try { GetCountersRequest response=pbRecordFactory.newRecordInstance(GetCountersRequest.class); Assert.assertEquals(GetCountersRequestPBImpl.class,response.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } }

    Class: org.apache.hadoop.mapreduce.v2.TestSpeculativeExecution

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testSpeculativeExecution() throws Exception { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } Job job=runSpecTest(false,false); boolean succeeded=job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState()); Counters counters=job.getCounters(); Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue()); Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue()); Assert.assertEquals(0,counters.findCounter(JobCounter.NUM_FAILED_MAPS).getValue()); job=runSpecTest(true,false); succeeded=job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState()); counters=job.getCounters(); Assert.assertEquals(3,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue()); Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue()); Assert.assertEquals(0,counters.findCounter(JobCounter.NUM_FAILED_MAPS).getValue()); Assert.assertEquals(1,counters.findCounter(JobCounter.NUM_KILLED_MAPS).getValue()); job=runSpecTest(false,true); succeeded=job.waitForCompletion(true); Assert.assertTrue(succeeded); Assert.assertEquals(JobStatus.State.SUCCEEDED,job.getJobState()); counters=job.getCounters(); Assert.assertEquals(2,counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue()); Assert.assertEquals(3,counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue()); }

    Class: org.apache.hadoop.mapreduce.v2.TestSpeculativeExecutionWithMRApp

    InternalCallVerifier EqualityVerifier 
    @Test public void testSepculateSuccessfulWithUpdateEvents() throws Exception { Clock actualClock=new SystemClock(); final ControlledClock clock=new ControlledClock(actualClock); clock.setTime(System.currentTimeMillis()); MRApp app=new MRApp(NUM_MAPPERS,NUM_REDUCERS,false,"test",true,clock); Job job=app.submit(new Configuration(),true,true); app.waitForState(job,JobState.RUNNING); Map tasks=job.getTasks(); Assert.assertEquals("Num tasks is not correct",NUM_MAPPERS + NUM_REDUCERS,tasks.size()); Iterator taskIter=tasks.values().iterator(); while (taskIter.hasNext()) { app.waitForState(taskIter.next(),TaskState.RUNNING); } clock.setTime(System.currentTimeMillis() + 1000); EventHandler appEventHandler=app.getContext().getEventHandler(); for ( Map.Entry mapTask : tasks.entrySet()) { for ( Map.Entry taskAttempt : mapTask.getValue().getAttempts().entrySet()) { TaskAttemptStatus status=createTaskAttemptStatus(taskAttempt.getKey(),(float)0.5,TaskAttemptState.RUNNING); TaskAttemptStatusUpdateEvent event=new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(),status); appEventHandler.handle(event); } } Task speculatedTask=null; int numTasksToFinish=NUM_MAPPERS + NUM_REDUCERS - 1; clock.setTime(System.currentTimeMillis() + 1000); for ( Map.Entry task : tasks.entrySet()) { for ( Map.Entry taskAttempt : task.getValue().getAttempts().entrySet()) { if (numTasksToFinish > 0) { appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),TaskAttemptEventType.TA_DONE)); appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),TaskAttemptEventType.TA_CONTAINER_CLEANED)); numTasksToFinish--; app.waitForState(taskAttempt.getValue(),TaskAttemptState.SUCCEEDED); } else { TaskAttemptStatus status=createTaskAttemptStatus(taskAttempt.getKey(),(float)0.75,TaskAttemptState.RUNNING); speculatedTask=task.getValue(); TaskAttemptStatusUpdateEvent event=new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(),status); appEventHandler.handle(event); } } } clock.setTime(System.currentTimeMillis() + 15000); for ( Map.Entry task : tasks.entrySet()) { for ( Map.Entry taskAttempt : task.getValue().getAttempts().entrySet()) { if (taskAttempt.getValue().getState() != TaskAttemptState.SUCCEEDED) { TaskAttemptStatus status=createTaskAttemptStatus(taskAttempt.getKey(),(float)0.75,TaskAttemptState.RUNNING); TaskAttemptStatusUpdateEvent event=new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(),status); appEventHandler.handle(event); } } } final Task speculatedTaskConst=speculatedTask; GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ if (speculatedTaskConst.getAttempts().size() != 2) { clock.setTime(System.currentTimeMillis() + 1000); return false; } else { return true; } } } ,1000,60000); TaskAttempt[] ta=makeFirstAttemptWin(appEventHandler,speculatedTask); verifySpeculationMessage(app,ta); app.waitForState(Service.STATE.STOPPED); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSpeculateSuccessfulWithoutUpdateEvents() throws Exception { Clock actualClock=new SystemClock(); final ControlledClock clock=new ControlledClock(actualClock); clock.setTime(System.currentTimeMillis()); MRApp app=new MRApp(NUM_MAPPERS,NUM_REDUCERS,false,"test",true,clock); Job job=app.submit(new Configuration(),true,true); app.waitForState(job,JobState.RUNNING); Map tasks=job.getTasks(); Assert.assertEquals("Num tasks is not correct",NUM_MAPPERS + NUM_REDUCERS,tasks.size()); Iterator taskIter=tasks.values().iterator(); while (taskIter.hasNext()) { app.waitForState(taskIter.next(),TaskState.RUNNING); } clock.setTime(System.currentTimeMillis() + 2000); EventHandler appEventHandler=app.getContext().getEventHandler(); for ( Map.Entry mapTask : tasks.entrySet()) { for ( Map.Entry taskAttempt : mapTask.getValue().getAttempts().entrySet()) { TaskAttemptStatus status=createTaskAttemptStatus(taskAttempt.getKey(),(float)0.8,TaskAttemptState.RUNNING); TaskAttemptStatusUpdateEvent event=new TaskAttemptStatusUpdateEvent(taskAttempt.getKey(),status); appEventHandler.handle(event); } } Random generator=new Random(); Object[] taskValues=tasks.values().toArray(); final Task taskToBeSpeculated=(Task)taskValues[generator.nextInt(taskValues.length)]; for ( Map.Entry mapTask : tasks.entrySet()) { for ( Map.Entry taskAttempt : mapTask.getValue().getAttempts().entrySet()) { if (mapTask.getKey() != taskToBeSpeculated.getID()) { appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),TaskAttemptEventType.TA_DONE)); appEventHandler.handle(new TaskAttemptEvent(taskAttempt.getKey(),TaskAttemptEventType.TA_CONTAINER_CLEANED)); app.waitForState(taskAttempt.getValue(),TaskAttemptState.SUCCEEDED); } } } GenericTestUtils.waitFor(new Supplier(){ @Override public Boolean get(){ if (taskToBeSpeculated.getAttempts().size() != 2) { clock.setTime(System.currentTimeMillis() + 1000); return false; } else { return true; } } } ,1000,60000); TaskAttempt[] ta=makeFirstAttemptWin(appEventHandler,taskToBeSpeculated); verifySpeculationMessage(app,ta); app.waitForState(Service.STATE.STOPPED); }

    Class: org.apache.hadoop.mapreduce.v2.TestUberAM

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Override @Test public void testFailingMapper() throws IOException, InterruptedException, ClassNotFoundException { LOG.info("\n\n\nStarting uberized testFailingMapper()."); if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test."); return; } Job job=runFailingMapperJob(); TaskID taskID=new TaskID(job.getJobID(),TaskType.MAP,0); TaskAttemptID aId=new TaskAttemptID(taskID,0); System.out.println("Diagnostics for " + aId + " :"); for ( String diag : job.getTaskDiagnostics(aId)) { System.out.println(diag); } boolean secondTaskAttemptExists=true; try { aId=new TaskAttemptID(taskID,1); System.out.println("Diagnostics for " + aId + " :"); for ( String diag : job.getTaskDiagnostics(aId)) { System.out.println(diag); } } catch ( Exception e) { secondTaskAttemptExists=false; } Assert.assertEquals(false,secondTaskAttemptExists); TaskCompletionEvent[] events=job.getTaskCompletionEvents(0,2); Assert.assertEquals(1,events.length); TaskCompletionEvent.Status status=events[0].getStatus(); Assert.assertTrue(status == TaskCompletionEvent.Status.FAILED || status == TaskCompletionEvent.Status.TIPFAILED); Assert.assertEquals(JobStatus.State.FAILED,job.getJobState()); }

    Class: org.apache.hadoop.mapreduce.v2.api.records.TestIds

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskAttemptId(){ long ts1=1315890136000l; long ts2=1315890136001l; TaskAttemptId t1=createTaskAttemptId(ts1,2,2,TaskType.MAP,2); TaskAttemptId t2=createTaskAttemptId(ts1,2,2,TaskType.REDUCE,2); TaskAttemptId t3=createTaskAttemptId(ts1,2,2,TaskType.MAP,3); TaskAttemptId t4=createTaskAttemptId(ts1,2,2,TaskType.MAP,1); TaskAttemptId t5=createTaskAttemptId(ts1,2,1,TaskType.MAP,3); TaskAttemptId t6=createTaskAttemptId(ts1,2,2,TaskType.MAP,2); assertTrue(t1.equals(t6)); assertFalse(t1.equals(t2)); assertFalse(t1.equals(t3)); assertFalse(t1.equals(t5)); assertTrue(t1.compareTo(t6) == 0); assertTrue(t1.compareTo(t2) < 0); assertTrue(t1.compareTo(t3) < 0); assertTrue(t1.compareTo(t4) > 0); assertTrue(t1.compareTo(t5) > 0); assertTrue(t1.hashCode() == t6.hashCode()); assertFalse(t1.hashCode() == t2.hashCode()); assertFalse(t1.hashCode() == t3.hashCode()); assertFalse(t1.hashCode() == t5.hashCode()); TaskAttemptId t7=createTaskAttemptId(ts2,5463346,4326575,TaskType.REDUCE,54375); assertEquals("attempt_" + ts1 + "_0002_m_000002_2",t1.toString()); assertEquals("attempt_" + ts2 + "_5463346_r_4326575_54375",t7.toString()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskId(){ long ts1=1315890136000l; long ts2=1315890136001l; TaskId t1=createTaskId(ts1,1,2,TaskType.MAP); TaskId t2=createTaskId(ts1,1,2,TaskType.REDUCE); TaskId t3=createTaskId(ts1,1,1,TaskType.MAP); TaskId t4=createTaskId(ts1,1,2,TaskType.MAP); TaskId t5=createTaskId(ts2,1,1,TaskType.MAP); assertTrue(t1.equals(t4)); assertFalse(t1.equals(t2)); assertFalse(t1.equals(t3)); assertFalse(t1.equals(t5)); assertTrue(t1.compareTo(t4) == 0); assertTrue(t1.compareTo(t2) < 0); assertTrue(t1.compareTo(t3) > 0); assertTrue(t1.compareTo(t5) < 0); assertTrue(t1.hashCode() == t4.hashCode()); assertFalse(t1.hashCode() == t2.hashCode()); assertFalse(t1.hashCode() == t3.hashCode()); assertFalse(t1.hashCode() == t5.hashCode()); TaskId t6=createTaskId(ts1,324151,54643747,TaskType.REDUCE); assertEquals("task_" + ts1 + "_0001_m_000002",t1.toString()); assertEquals("task_" + ts1 + "_324151_r_54643747",t6.toString()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobId(){ long ts1=1315890136000l; long ts2=1315890136001l; JobId j1=createJobId(ts1,2); JobId j2=createJobId(ts1,1); JobId j3=createJobId(ts2,1); JobId j4=createJobId(ts1,2); assertTrue(j1.equals(j4)); assertFalse(j1.equals(j2)); assertFalse(j1.equals(j3)); assertTrue(j1.compareTo(j4) == 0); assertTrue(j1.compareTo(j2) > 0); assertTrue(j1.compareTo(j3) < 0); assertTrue(j1.hashCode() == j4.hashCode()); assertFalse(j1.hashCode() == j2.hashCode()); assertFalse(j1.hashCode() == j3.hashCode()); JobId j5=createJobId(ts1,231415); assertEquals("job_" + ts1 + "_0002",j1.toString()); assertEquals("job_" + ts1 + "_231415",j5.toString()); }

    Class: org.apache.hadoop.mapreduce.v2.app.TestAMInfos

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAMInfosWithoutRecoveryEnabled() throws Exception { int runCount=0; MRApp app=new MRAppWithHistory(1,0,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); long am1StartTime=app.getAllAMInfos().get(0).getStartTime(); Assert.assertEquals("No of tasks not correct",1,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask=it.next(); app.waitForState(mapTask,TaskState.RUNNING); TaskAttempt taskAttempt=mapTask.getAttempts().values().iterator().next(); app.waitForState(taskAttempt,TaskAttemptState.RUNNING); app.stop(); app=new MRAppWithHistory(1,0,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,false); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",1,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask=it.next(); List amInfos=app.getAllAMInfos(); Assert.assertEquals(2,amInfos.size()); AMInfo amInfoOne=amInfos.get(0); Assert.assertEquals(am1StartTime,amInfoOne.getStartTime()); app.stop(); }

    Class: org.apache.hadoop.mapreduce.v2.app.TestFail

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskFailWithUnusedContainer() throws Exception { MRApp app=new MRAppWithFailingTaskAndUnusedContainer(); Configuration conf=new Configuration(); int maxAttempts=1; conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Map tasks=job.getTasks(); Assert.assertEquals("Num tasks is not correct",1,tasks.size()); Task task=tasks.values().iterator().next(); app.waitForState(task,TaskState.SCHEDULED); Map attempts=tasks.values().iterator().next().getAttempts(); Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size()); TaskAttempt attempt=attempts.values().iterator().next(); app.waitForInternalState((TaskAttemptImpl)attempt,TaskAttemptStateInternal.ASSIGNED); app.getDispatcher().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_CONTAINER_COMPLETED)); app.waitForState(job,JobState.FAILED); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFailTask() throws Exception { MRApp app=new MockFirstFailingAttemptMRApp(1,0); Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); Job job=app.submit(conf); app.waitForState(job,JobState.SUCCEEDED); Map tasks=job.getTasks(); Assert.assertEquals("Num tasks is not correct",1,tasks.size()); Task task=tasks.values().iterator().next(); Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task.getReport().getTaskState()); Map attempts=tasks.values().iterator().next().getAttempts(); Assert.assertEquals("Num attempts is not correct",2,attempts.size()); Iterator it=attempts.values().iterator(); Assert.assertEquals("Attempt state not correct",TaskAttemptState.FAILED,it.next().getReport().getTaskAttemptState()); Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,it.next().getReport().getTaskAttemptState()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTimedOutTask() throws Exception { MRApp app=new TimeOutTaskMRApp(1,0); Configuration conf=new Configuration(); int maxAttempts=2; conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); Job job=app.submit(conf); app.waitForState(job,JobState.FAILED); Map tasks=job.getTasks(); Assert.assertEquals("Num tasks is not correct",1,tasks.size()); Task task=tasks.values().iterator().next(); Assert.assertEquals("Task state not correct",TaskState.FAILED,task.getReport().getTaskState()); Map attempts=tasks.values().iterator().next().getAttempts(); Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size()); for ( TaskAttempt attempt : attempts.values()) { Assert.assertEquals("Attempt state not correct",TaskAttemptState.FAILED,attempt.getReport().getTaskAttemptState()); } }

    Class: org.apache.hadoop.mapreduce.v2.app.TestFetchFailure

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFetchFailureMultipleReduces() throws Exception { MRApp app=new MRApp(1,3,false,this.getClass().getName(),true); Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",4,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask=it.next(); Task reduceTask=it.next(); Task reduceTask2=it.next(); Task reduceTask3=it.next(); app.waitForState(mapTask,TaskState.RUNNING); TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next(); app.waitForState(mapAttempt1,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask,TaskState.SUCCEEDED); TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Num completion events not correct",1,events.length); Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus()); app.waitForState(reduceTask,TaskState.RUNNING); app.waitForState(reduceTask2,TaskState.RUNNING); app.waitForState(reduceTask3,TaskState.RUNNING); TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next(); app.waitForState(reduceAttempt,TaskAttemptState.RUNNING); updateStatus(app,reduceAttempt,Phase.SHUFFLE); TaskAttempt reduceAttempt2=reduceTask2.getAttempts().values().iterator().next(); app.waitForState(reduceAttempt2,TaskAttemptState.RUNNING); updateStatus(app,reduceAttempt2,Phase.SHUFFLE); TaskAttempt reduceAttempt3=reduceTask3.getAttempts().values().iterator().next(); app.waitForState(reduceAttempt3,TaskAttemptState.RUNNING); updateStatus(app,reduceAttempt3,Phase.SHUFFLE); sendFetchFailure(app,reduceAttempt,mapAttempt1); sendFetchFailure(app,reduceAttempt,mapAttempt1); assertEquals(TaskState.SUCCEEDED,mapTask.getState()); updateStatus(app,reduceAttempt2,Phase.REDUCE); updateStatus(app,reduceAttempt3,Phase.REDUCE); sendFetchFailure(app,reduceAttempt,mapAttempt1); app.waitForState(mapTask,TaskState.RUNNING); Assert.assertEquals("Map TaskAttempt state not correct",TaskAttemptState.FAILED,mapAttempt1.getState()); Assert.assertEquals("Num attempts in Map Task not correct",2,mapTask.getAttempts().size()); Iterator atIt=mapTask.getAttempts().values().iterator(); atIt.next(); TaskAttempt mapAttempt2=atIt.next(); app.waitForState(mapAttempt2,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask,TaskState.SUCCEEDED); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE)); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt2.getID(),TaskAttemptEventType.TA_DONE)); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt3.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus()); events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Num completion events not correct",6,events.length); Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[0].getAttemptId()); Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[1].getAttemptId()); Assert.assertEquals("Event map attempt id not correct",mapAttempt2.getID(),events[2].getAttemptId()); Assert.assertEquals("Event reduce attempt id not correct",reduceAttempt.getID(),events[3].getAttemptId()); Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus()); Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.FAILED,events[1].getStatus()); Assert.assertEquals("Event status not correct for map attempt2",TaskAttemptCompletionEventStatus.SUCCEEDED,events[2].getStatus()); Assert.assertEquals("Event status not correct for reduce attempt1",TaskAttemptCompletionEventStatus.SUCCEEDED,events[3].getStatus()); TaskCompletionEvent mapEvents[]=job.getMapAttemptCompletionEvents(0,2); TaskCompletionEvent convertedEvents[]=TypeConverter.fromYarn(events); Assert.assertEquals("Incorrect number of map events",2,mapEvents.length); Assert.assertArrayEquals("Unexpected map events",Arrays.copyOfRange(convertedEvents,0,2),mapEvents); mapEvents=job.getMapAttemptCompletionEvents(2,200); Assert.assertEquals("Incorrect number of map events",1,mapEvents.length); Assert.assertEquals("Unexpected map event",convertedEvents[2],mapEvents[0]); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFetchFailure() throws Exception { MRApp app=new MRApp(1,1,false,this.getClass().getName(),true); Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",2,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask=it.next(); Task reduceTask=it.next(); app.waitForState(mapTask,TaskState.RUNNING); TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next(); app.waitForState(mapAttempt1,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask,TaskState.SUCCEEDED); TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Num completion events not correct",1,events.length); Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus()); app.waitForState(reduceTask,TaskState.RUNNING); TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next(); app.waitForState(reduceAttempt,TaskAttemptState.RUNNING); sendFetchFailure(app,reduceAttempt,mapAttempt1); sendFetchFailure(app,reduceAttempt,mapAttempt1); sendFetchFailure(app,reduceAttempt,mapAttempt1); app.waitForState(mapTask,TaskState.RUNNING); Assert.assertEquals("Map TaskAttempt state not correct",TaskAttemptState.FAILED,mapAttempt1.getState()); Assert.assertEquals("Num attempts in Map Task not correct",2,mapTask.getAttempts().size()); Iterator atIt=mapTask.getAttempts().values().iterator(); atIt.next(); TaskAttempt mapAttempt2=atIt.next(); app.waitForState(mapAttempt2,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt2.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask,TaskState.SUCCEEDED); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus()); events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Num completion events not correct",4,events.length); Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[0].getAttemptId()); Assert.assertEquals("Event map attempt id not correct",mapAttempt1.getID(),events[1].getAttemptId()); Assert.assertEquals("Event map attempt id not correct",mapAttempt2.getID(),events[2].getAttemptId()); Assert.assertEquals("Event redude attempt id not correct",reduceAttempt.getID(),events[3].getAttemptId()); Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.OBSOLETE,events[0].getStatus()); Assert.assertEquals("Event status not correct for map attempt1",TaskAttemptCompletionEventStatus.FAILED,events[1].getStatus()); Assert.assertEquals("Event status not correct for map attempt2",TaskAttemptCompletionEventStatus.SUCCEEDED,events[2].getStatus()); Assert.assertEquals("Event status not correct for reduce attempt1",TaskAttemptCompletionEventStatus.SUCCEEDED,events[3].getStatus()); TaskCompletionEvent mapEvents[]=job.getMapAttemptCompletionEvents(0,2); TaskCompletionEvent convertedEvents[]=TypeConverter.fromYarn(events); Assert.assertEquals("Incorrect number of map events",2,mapEvents.length); Assert.assertArrayEquals("Unexpected map events",Arrays.copyOfRange(convertedEvents,0,2),mapEvents); mapEvents=job.getMapAttemptCompletionEvents(2,200); Assert.assertEquals("Incorrect number of map events",1,mapEvents.length); Assert.assertEquals("Unexpected map event",convertedEvents[2],mapEvents[0]); }

    InternalCallVerifier EqualityVerifier 
    /** * This tests that if a map attempt was failed (say due to fetch failures), * then it gets re-run. When the next map attempt is running, if the AM dies, * then, on AM re-run, the AM does not incorrectly remember the first failed * attempt. Currently recovery does not recover running tasks. Effectively, * the AM re-runs the maps from scratch. */ @Test public void testFetchFailureWithRecovery() throws Exception { int runCount=0; MRApp app=new MRAppWithHistory(1,1,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",2,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask=it.next(); Task reduceTask=it.next(); app.waitForState(mapTask,TaskState.RUNNING); TaskAttempt mapAttempt1=mapTask.getAttempts().values().iterator().next(); app.waitForState(mapAttempt1,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask,TaskState.SUCCEEDED); TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Num completion events not correct",1,events.length); Assert.assertEquals("Event status not correct",TaskAttemptCompletionEventStatus.SUCCEEDED,events[0].getStatus()); app.waitForState(reduceTask,TaskState.RUNNING); TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next(); app.waitForState(reduceAttempt,TaskAttemptState.RUNNING); sendFetchFailure(app,reduceAttempt,mapAttempt1); sendFetchFailure(app,reduceAttempt,mapAttempt1); sendFetchFailure(app,reduceAttempt,mapAttempt1); app.waitForState(mapTask,TaskState.RUNNING); app.stop(); app=new MRAppWithHistory(1,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",2,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask=it.next(); reduceTask=it.next(); app.waitForState(mapTask,TaskState.RUNNING); mapAttempt1=mapTask.getAttempts().values().iterator().next(); app.waitForState(mapAttempt1,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask,TaskState.SUCCEEDED); reduceAttempt=reduceTask.getAttempts().values().iterator().next(); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceAttempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Num completion events not correct",2,events.length); }

    Class: org.apache.hadoop.mapreduce.v2.app.TestJobEndNotifier

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testNotificationOnLastRetryNormalShutdown() throws Exception { HttpServer2 server=startHttpServer(); MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,true,this.getClass().getName(),true,2,true)); doNothing().when(app).sysexit(); JobConf conf=new JobConf(); conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus"); JobImpl job=(JobImpl)app.submit(conf); app.waitForInternalState(job,JobStateInternal.SUCCEEDED); app.shutDownJob(); Assert.assertTrue(app.isLastAMRetry()); Assert.assertEquals(1,JobEndServlet.calledTimes); Assert.assertEquals("jobid=" + job.getID() + "&status=SUCCEEDED",JobEndServlet.requestUri.getQuery()); Assert.assertEquals(JobState.SUCCEEDED.toString(),JobEndServlet.foundJobState); server.stop(); }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testNotifyRetries() throws InterruptedException { JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS,"0"); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS,"1"); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL,"http://nonexistent"); conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL,"5000"); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL,"5000"); JobReport jobReport=mock(JobReport.class); long startTime=System.currentTimeMillis(); this.notificationCount=0; this.setConf(conf); this.notify(jobReport); long endTime=System.currentTimeMillis(); Assert.assertEquals("Only 1 try was expected but was : " + this.notificationCount,1,this.notificationCount); Assert.assertTrue("Should have taken more than 5 seconds it took " + (endTime - startTime),endTime - startTime > 5000); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS,"3"); conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS,"3"); conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL,"3000"); conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL,"3000"); startTime=System.currentTimeMillis(); this.notificationCount=0; this.setConf(conf); this.notify(jobReport); endTime=System.currentTimeMillis(); Assert.assertEquals("Only 3 retries were expected but was : " + this.notificationCount,3,this.notificationCount); Assert.assertTrue("Should have taken more than 9 seconds it took " + (endTime - startTime),endTime - startTime > 9000); }

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testNotificationOnLastRetryUnregistrationFailure() throws Exception { HttpServer2 server=startHttpServer(); MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,false,this.getClass().getName(),true,2,false)); app.isLastAMRetry=true; doNothing().when(app).sysexit(); JobConf conf=new JobConf(); conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus"); JobImpl job=(JobImpl)app.submit(conf); app.waitForState(job,JobState.RUNNING); app.getContext().getEventHandler().handle(new JobEvent(app.getJobId(),JobEventType.JOB_AM_REBOOT)); app.waitForInternalState(job,JobStateInternal.REBOOT); app.waitForServiceToStop(10000); Assert.assertFalse(app.isLastAMRetry()); Assert.assertEquals(0,JobEndServlet.calledTimes); Assert.assertNull(JobEndServlet.requestUri); Assert.assertNull(JobEndServlet.foundJobState); server.stop(); }

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testAbsentNotificationOnNotLastRetryUnregistrationFailure() throws Exception { HttpServer2 server=startHttpServer(); MRApp app=spy(new MRAppWithCustomContainerAllocator(2,2,false,this.getClass().getName(),true,1,false)); doNothing().when(app).sysexit(); JobConf conf=new JobConf(); conf.set(JobContext.MR_JOB_END_NOTIFICATION_URL,JobEndServlet.baseUrl + "jobend?jobid=$jobId&status=$jobStatus"); JobImpl job=(JobImpl)app.submit(conf); app.waitForState(job,JobState.RUNNING); app.getContext().getEventHandler().handle(new JobEvent(app.getJobId(),JobEventType.JOB_AM_REBOOT)); app.waitForInternalState(job,JobStateInternal.REBOOT); app.shutDownJob(); app.waitForState(job,JobState.RUNNING); Assert.assertFalse(app.isLastAMRetry()); Assert.assertEquals(0,JobEndServlet.calledTimes); Assert.assertNull(JobEndServlet.requestUri); Assert.assertNull(JobEndServlet.foundJobState); server.stop(); }

    Class: org.apache.hadoop.mapreduce.v2.app.TestKill

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testKillTaskAttempt() throws Exception { final CountDownLatch latch=new CountDownLatch(1); MRApp app=new BlockingMRApp(2,0,latch); Job job=app.submit(new Configuration()); app.waitForState(job,JobState.RUNNING); Map tasks=job.getTasks(); Assert.assertEquals("No of tasks is not correct",2,tasks.size()); Iterator it=tasks.values().iterator(); Task task1=it.next(); Task task2=it.next(); app.waitForState(task1,TaskState.SCHEDULED); app.waitForState(task2,TaskState.SCHEDULED); TaskAttempt attempt=task1.getAttempts().values().iterator().next(); app.getContext().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_KILL)); latch.countDown(); app.waitForState(job,JobState.SUCCEEDED); Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task1.getReport().getTaskState()); Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task2.getReport().getTaskState()); Map attempts=task1.getAttempts(); Assert.assertEquals("No of attempts is not correct",2,attempts.size()); Iterator iter=attempts.values().iterator(); Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,iter.next().getReport().getTaskAttemptState()); Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState()); attempts=task2.getAttempts(); Assert.assertEquals("No of attempts is not correct",1,attempts.size()); iter=attempts.values().iterator(); Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testKillJob() throws Exception { final CountDownLatch latch=new CountDownLatch(1); MRApp app=new BlockingMRApp(1,0,latch); Job job=app.submit(new Configuration()); app.waitForState(job,JobState.RUNNING); app.getContext().getEventHandler().handle(new JobEvent(job.getID(),JobEventType.JOB_KILL)); latch.countDown(); app.waitForState(job,JobState.KILLED); Map tasks=job.getTasks(); Assert.assertEquals("No of tasks is not correct",1,tasks.size()); Task task=tasks.values().iterator().next(); Assert.assertEquals("Task state not correct",TaskState.KILLED,task.getReport().getTaskState()); Map attempts=tasks.values().iterator().next().getAttempts(); Assert.assertEquals("No of attempts is not correct",1,attempts.size()); Iterator it=attempts.values().iterator(); Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,it.next().getReport().getTaskAttemptState()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testKillTask() throws Exception { final CountDownLatch latch=new CountDownLatch(1); MRApp app=new BlockingMRApp(2,0,latch); Job job=app.submit(new Configuration()); app.waitForState(job,JobState.RUNNING); Map tasks=job.getTasks(); Assert.assertEquals("No of tasks is not correct",2,tasks.size()); Iterator it=tasks.values().iterator(); Task task1=it.next(); Task task2=it.next(); app.getContext().getEventHandler().handle(new TaskEvent(task1.getID(),TaskEventType.T_KILL)); latch.countDown(); app.waitForState(job,JobState.SUCCEEDED); Assert.assertEquals("Task state not correct",TaskState.KILLED,task1.getReport().getTaskState()); Assert.assertEquals("Task state not correct",TaskState.SUCCEEDED,task2.getReport().getTaskState()); Map attempts=task1.getAttempts(); Assert.assertEquals("No of attempts is not correct",1,attempts.size()); Iterator iter=attempts.values().iterator(); Assert.assertEquals("Attempt state not correct",TaskAttemptState.KILLED,iter.next().getReport().getTaskAttemptState()); attempts=task2.getAttempts(); Assert.assertEquals("No of attempts is not correct",1,attempts.size()); iter=attempts.values().iterator(); Assert.assertEquals("Attempt state not correct",TaskAttemptState.SUCCEEDED,iter.next().getReport().getTaskAttemptState()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testKillTaskWaitKillJobAfterTA_DONE() throws Exception { CountDownLatch latch=new CountDownLatch(1); final Dispatcher dispatcher=new MyAsyncDispatch(latch,TaskAttemptEventType.TA_DONE); MRApp app=new MRApp(1,1,false,this.getClass().getName(),true){ @Override public Dispatcher createDispatcher(){ return dispatcher; } } ; Job job=app.submit(new Configuration()); JobId jobId=app.getJobId(); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",2,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask=it.next(); Task reduceTask=it.next(); app.waitForState(mapTask,TaskState.RUNNING); app.waitForState(reduceTask,TaskState.RUNNING); TaskAttempt mapAttempt=mapTask.getAttempts().values().iterator().next(); app.waitForState(mapAttempt,TaskAttemptState.RUNNING); TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next(); app.waitForState(reduceAttempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(),TaskAttemptEventType.TA_DONE)); app.getContext().getEventHandler().handle(new JobEvent(jobId,JobEventType.JOB_KILL)); latch.countDown(); app.waitForInternalState((JobImpl)job,JobStateInternal.KILLED); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testKillTaskWait() throws Exception { final Dispatcher dispatcher=new AsyncDispatcher(){ private TaskAttemptEvent cachedKillEvent; @Override protected void dispatch( Event event){ if (event instanceof TaskAttemptEvent) { TaskAttemptEvent killEvent=(TaskAttemptEvent)event; if (killEvent.getType() == TaskAttemptEventType.TA_KILL) { TaskAttemptId taID=killEvent.getTaskAttemptID(); if (taID.getTaskId().getTaskType() == TaskType.REDUCE && taID.getTaskId().getId() == 0 && taID.getId() == 0) { super.dispatch(new TaskAttemptEvent(taID,TaskAttemptEventType.TA_DONE)); super.dispatch(new TaskAttemptEvent(taID,TaskAttemptEventType.TA_CONTAINER_CLEANED)); super.dispatch(new TaskTAttemptEvent(taID,TaskEventType.T_ATTEMPT_SUCCEEDED)); this.cachedKillEvent=killEvent; return; } } } else if (event instanceof TaskEvent) { TaskEvent taskEvent=(TaskEvent)event; if (taskEvent.getType() == TaskEventType.T_ATTEMPT_SUCCEEDED && this.cachedKillEvent != null) { super.dispatch(this.cachedKillEvent); return; } } super.dispatch(event); } } ; MRApp app=new MRApp(1,1,false,this.getClass().getName(),true){ @Override public Dispatcher createDispatcher(){ return dispatcher; } } ; Job job=app.submit(new Configuration()); JobId jobId=app.getJobId(); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",2,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask=it.next(); Task reduceTask=it.next(); app.waitForState(mapTask,TaskState.RUNNING); app.waitForState(reduceTask,TaskState.RUNNING); TaskAttempt mapAttempt=mapTask.getAttempts().values().iterator().next(); app.waitForState(mapAttempt,TaskAttemptState.RUNNING); TaskAttempt reduceAttempt=reduceTask.getAttempts().values().iterator().next(); app.waitForState(reduceAttempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapAttempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask,TaskState.SUCCEEDED); app.getContext().getEventHandler().handle(new JobEvent(jobId,JobEventType.JOB_KILL)); app.waitForInternalState((JobImpl)job,JobStateInternal.KILLED); }

    Class: org.apache.hadoop.mapreduce.v2.app.TestMRApp

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobRebootNotLastRetryOnUnregistrationFailure() throws Exception { MRApp app=new MRApp(1,0,false,this.getClass().getName(),true); Job job=app.submit(new Configuration()); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",1,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task task=it.next(); app.waitForState(task,TaskState.RUNNING); app.getContext().getEventHandler().handle(new JobEvent(job.getID(),JobEventType.JOB_AM_REBOOT)); app.waitForState(job,JobState.RUNNING); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobRebootOnLastRetryOnUnregistrationFailure() throws Exception { MRApp app=new MRApp(1,0,false,this.getClass().getName(),true,2,false); Configuration conf=new Configuration(); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",1,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task task=it.next(); app.waitForState(task,TaskState.RUNNING); app.getContext().getEventHandler().handle(new JobEvent(job.getID(),JobEventType.JOB_AM_REBOOT)); app.waitForInternalState((JobImpl)job,JobStateInternal.REBOOT); app.waitForState(job,JobState.RUNNING); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobError() throws Exception { MRApp app=new MRApp(1,0,false,this.getClass().getName(),true); Job job=app.submit(new Configuration()); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",1,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task task=it.next(); app.waitForState(task,TaskState.RUNNING); app.getContext().getEventHandler().handle(new TaskEvent(task.getID(),TaskEventType.T_SCHEDULE)); app.waitForState(job,JobState.ERROR); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * The test verifies that the AM re-runs maps that have run on bad nodes. It * also verifies that the AM records all success/killed events so that reduces * are notified about map output status changes. It also verifies that the * re-run information is preserved across AM restart */ @Test public void testUpdatedNodes() throws Exception { int runCount=0; MRApp app=new MRAppWithHistory(2,2,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART,0.5f); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",4,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next(); TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next(); NodeId node1=task1Attempt.getNodeId(); NodeId node2=task2Attempt.getNodeId(); Assert.assertEquals(node1,node2); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.SUCCEEDED); TaskAttemptCompletionEvent[] events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Expecting 2 completion events for success",2,events.length); ArrayList updatedNodes=new ArrayList(); NodeReport nr=RecordFactoryProvider.getRecordFactory(null).newRecordInstance(NodeReport.class); nr.setNodeId(node1); nr.setNodeState(NodeState.UNHEALTHY); updatedNodes.add(nr); app.getContext().getEventHandler().handle(new JobUpdatedNodesEvent(job.getID(),updatedNodes)); app.waitForState(task1Attempt,TaskAttemptState.KILLED); app.waitForState(task2Attempt,TaskAttemptState.KILLED); events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Expecting 2 more completion events for killed",4,events.length); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); Iterator itr=mapTask1.getAttempts().values().iterator(); itr.next(); task1Attempt=itr.next(); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.RUNNING); events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Expecting 1 more completion events for success",5,events.length); app.stop(); app=new MRAppWithHistory(2,2,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",4,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); Task reduceTask1=it.next(); Task reduceTask2=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.RUNNING); events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Expecting 2 completion events for killed & success of map1",2,events.length); task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask2,TaskState.SUCCEEDED); events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Expecting 1 more completion events for success",3,events.length); app.waitForState(reduceTask1,TaskState.RUNNING); app.waitForState(reduceTask2,TaskState.RUNNING); TaskAttempt task3Attempt=reduceTask1.getAttempts().values().iterator().next(); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task3Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(reduceTask1,TaskState.SUCCEEDED); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task3Attempt.getID(),TaskAttemptEventType.TA_KILL)); app.waitForState(reduceTask1,TaskState.SUCCEEDED); TaskAttempt task4Attempt=reduceTask2.getAttempts().values().iterator().next(); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task4Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(reduceTask2,TaskState.SUCCEEDED); events=job.getTaskAttemptCompletionEvents(0,100); Assert.assertEquals("Expecting 2 more completion events for reduce success",5,events.length); app.waitForState(job,JobState.SUCCEEDED); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testMapReduce() throws Exception { MRApp app=new MRApp(2,2,true,this.getClass().getName(),true); Job job=app.submit(new Configuration()); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertEquals(System.getProperty("user.name"),job.getUserName()); }

    InternalCallVerifier EqualityVerifier 
    @SuppressWarnings("resource") @Test public void testJobSuccess() throws Exception { MRApp app=new MRApp(2,2,true,this.getClass().getName(),true,false); JobImpl job=(JobImpl)app.submit(new Configuration()); app.waitForInternalState(job,JobStateInternal.SUCCEEDED); Assert.assertEquals(JobState.RUNNING,job.getState()); app.successfullyUnregistered.set(true); app.waitForState(job,JobState.SUCCEEDED); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testCommitPending() throws Exception { MRApp app=new MRApp(1,0,false,this.getClass().getName(),true); Job job=app.submit(new Configuration()); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",1,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task task=it.next(); app.waitForState(task,TaskState.RUNNING); TaskAttempt attempt=task.getAttempts().values().iterator().next(); app.waitForState(attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_COMMIT_PENDING)); app.waitForState(attempt,TaskAttemptState.COMMIT_PENDING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(attempt.getID(),TaskAttemptEventType.TA_COMMIT_PENDING)); app.waitForState(attempt,TaskAttemptState.COMMIT_PENDING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); }

    Class: org.apache.hadoop.mapreduce.v2.app.TestMRAppComponentDependencies

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=20000) public void testComponentStopOrder() throws Exception { @SuppressWarnings("resource") TestMRApp app=new TestMRApp(1,1,true,this.getClass().getName(),true); JobImpl job=(JobImpl)app.submit(new Configuration()); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); int waitTime=20 * 1000; while (waitTime > 0 && app.numStops < 2) { Thread.sleep(100); waitTime-=100; } Assert.assertEquals(1,app.JobHistoryEventHandlerStopped); Assert.assertEquals(2,app.clientServiceStopped); }

    Class: org.apache.hadoop.mapreduce.v2.app.TestMRAppMaster

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMRAppMasterFailLock() throws IOException, InterruptedException { String applicationAttemptIdStr="appattempt_1317529182569_0004_000002"; String containerIdStr="container_1317529182569_0004_000002_1"; String userName="TestAppMasterUser"; JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr); JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId())); Path start=MRApps.getStartJobCommitFile(conf,userName,jobId); Path end=MRApps.getEndJobCommitFailureFile(conf,userName,jobId); FileSystem fs=FileSystem.get(conf); fs.create(start).close(); fs.create(end).close(); ContainerId containerId=ConverterUtils.toContainerId(containerIdStr); MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false); boolean caught=false; try { MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); } catch ( IOException e) { LOG.info("Caught expected Exception",e); caught=true; } assertTrue(caught); assertTrue(appMaster.errorHappenedShutDown); assertEquals(JobStateInternal.FAILED,appMaster.forcedState); appMaster.stop(); verifyFailedStatus((MRAppMasterTest)appMaster,"FAILED"); }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMRAppMasterMissingStaging() throws IOException, InterruptedException { String applicationAttemptIdStr="appattempt_1317529182569_0004_000002"; String containerIdStr="container_1317529182569_0004_000002_1"; String userName="TestAppMasterUser"; JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr); File dir=new File(stagingDir); if (dir.exists()) { FileUtils.deleteDirectory(dir); } ContainerId containerId=ConverterUtils.toContainerId(containerIdStr); MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false); boolean caught=false; try { MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); } catch ( IOException e) { LOG.info("Caught expected Exception",e); caught=true; } assertTrue(caught); assertTrue(appMaster.errorHappenedShutDown); assertEquals(JobStateInternal.ERROR,appMaster.forcedState); appMaster.stop(); }

    APIUtilityVerifier IterativeVerifier EqualityVerifier 
    @Test(timeout=30000) public void testMRAppMasterMaxAppAttempts() throws IOException, InterruptedException { Boolean[] expectedBools=new Boolean[]{false,false,false}; String applicationAttemptIdStr="appattempt_1317529182569_0004_000002"; String containerIdStr="container_1317529182569_0004_000002_1"; String userName="TestAppMasterUser"; ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr); ContainerId containerId=ConverterUtils.toContainerId(containerIdStr); JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); File stagingDir=new File(MRApps.getStagingAreaDir(conf,userName).toString()); stagingDir.mkdirs(); for (int i=0; i < expectedBools.length; ++i) { MRAppMasterTest appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,true); MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); assertEquals("isLastAMRetry is correctly computed.",expectedBools[i],appMaster.isLastAMRetry()); } }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testMRAppMasterForDifferentUser() throws IOException, InterruptedException { String applicationAttemptIdStr="appattempt_1317529182569_0004_000001"; String containerIdStr="container_1317529182569_0004_000001_1"; String userName="TestAppMasterUser"; ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr); ContainerId containerId=ConverterUtils.toContainerId(containerIdStr); MRAppMasterTest appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis()); JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); Path userPath=new Path(stagingDir,userName); Path userStagingPath=new Path(userPath,".staging"); assertEquals(userStagingPath.toString(),appMaster.stagingDirPath.toString()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMRAppMasterSuccessLock() throws IOException, InterruptedException { String applicationAttemptIdStr="appattempt_1317529182569_0004_000002"; String containerIdStr="container_1317529182569_0004_000002_1"; String userName="TestAppMasterUser"; JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr); JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId())); Path start=MRApps.getStartJobCommitFile(conf,userName,jobId); Path end=MRApps.getEndJobCommitSuccessFile(conf,userName,jobId); FileSystem fs=FileSystem.get(conf); fs.create(start).close(); fs.create(end).close(); ContainerId containerId=ConverterUtils.toContainerId(containerIdStr); MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false); boolean caught=false; try { MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); } catch ( IOException e) { LOG.info("Caught expected Exception",e); caught=true; } assertTrue(caught); assertTrue(appMaster.errorHappenedShutDown); assertEquals(JobStateInternal.SUCCEEDED,appMaster.forcedState); appMaster.stop(); verifyFailedStatus((MRAppMasterTest)appMaster,"SUCCEEDED"); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testMRAppMasterCredentials() throws Exception { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); Credentials credentials=new Credentials(); byte[] identifier="MyIdentifier".getBytes(); byte[] password="MyPassword".getBytes(); Text kind=new Text("MyTokenKind"); Text service=new Text("host:port"); Token myToken=new Token(identifier,password,kind,service); Text tokenAlias=new Text("myToken"); credentials.addToken(tokenAlias,myToken); Text appTokenService=new Text("localhost:0"); Token appToken=new Token(identifier,password,AMRMTokenIdentifier.KIND_NAME,appTokenService); credentials.addToken(appTokenService,appToken); Text keyAlias=new Text("mySecretKeyAlias"); credentials.addSecretKey(keyAlias,"mySecretKey".getBytes()); Token storedToken=credentials.getToken(tokenAlias); JobConf conf=new JobConf(); Path tokenFilePath=new Path(testDir.getAbsolutePath(),"tokens-file"); Map newEnv=new HashMap(); newEnv.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION,tokenFilePath.toUri().getPath()); setNewEnvironmentHack(newEnv); credentials.writeTokenStorageFile(tokenFilePath,conf); ApplicationId appId=ApplicationId.newInstance(12345,56); ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(applicationAttemptId,546); String userName=UserGroupInformation.getCurrentUser().getShortUserName(); File stagingDir=new File(MRApps.getStagingAreaDir(conf,userName).toString()); stagingDir.mkdirs(); UserGroupInformation.setLoginUser(null); MRAppMasterTest appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,true); MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); Credentials appMasterCreds=appMaster.getCredentials(); Assert.assertNotNull(appMasterCreds); Assert.assertEquals(1,appMasterCreds.numberOfSecretKeys()); Assert.assertEquals(1,appMasterCreds.numberOfTokens()); Token usedToken=appMasterCreds.getToken(tokenAlias); Assert.assertNotNull(usedToken); Assert.assertEquals(storedToken,usedToken); byte[] usedKey=appMasterCreds.getSecretKey(keyAlias); Assert.assertNotNull(usedKey); Assert.assertEquals("mySecretKey",new String(usedKey)); Credentials confCredentials=conf.getCredentials(); Assert.assertEquals(1,confCredentials.numberOfSecretKeys()); Assert.assertEquals(1,confCredentials.numberOfTokens()); Assert.assertEquals(storedToken,confCredentials.getToken(tokenAlias)); Assert.assertEquals("mySecretKey",new String(confCredentials.getSecretKey(keyAlias))); Credentials ugiCredentials=appMaster.getUgi().getCredentials(); Assert.assertEquals(1,ugiCredentials.numberOfSecretKeys()); Assert.assertEquals(2,ugiCredentials.numberOfTokens()); Assert.assertEquals(storedToken,ugiCredentials.getToken(tokenAlias)); Assert.assertEquals(appToken,ugiCredentials.getToken(appTokenService)); Assert.assertEquals("mySecretKey",new String(ugiCredentials.getSecretKey(keyAlias))); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMRAppMasterMidLock() throws IOException, InterruptedException { String applicationAttemptIdStr="appattempt_1317529182569_0004_000002"; String containerIdStr="container_1317529182569_0004_000002_1"; String userName="TestAppMasterUser"; JobConf conf=new JobConf(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); ApplicationAttemptId applicationAttemptId=ConverterUtils.toApplicationAttemptId(applicationAttemptIdStr); JobId jobId=TypeConverter.toYarn(TypeConverter.fromYarn(applicationAttemptId.getApplicationId())); Path start=MRApps.getStartJobCommitFile(conf,userName,jobId); FileSystem fs=FileSystem.get(conf); fs.create(start).close(); ContainerId containerId=ConverterUtils.toContainerId(containerIdStr); MRAppMaster appMaster=new MRAppMasterTest(applicationAttemptId,containerId,"host",-1,-1,System.currentTimeMillis(),false,false); boolean caught=false; try { MRAppMaster.initAndStartAppMaster(appMaster,conf,userName); } catch ( IOException e) { LOG.info("Caught expected Exception",e); caught=true; } assertTrue(caught); assertTrue(appMaster.errorHappenedShutDown); assertEquals(JobStateInternal.ERROR,appMaster.forcedState); appMaster.stop(); verifyFailedStatus((MRAppMasterTest)appMaster,"FAILED"); }

    Class: org.apache.hadoop.mapreduce.v2.app.TestMRClientService

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test public void test() throws Exception { MRAppWithClientService app=new MRAppWithClientService(1,0,false); Configuration conf=new Configuration(); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",1,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task task=it.next(); app.waitForState(task,TaskState.RUNNING); TaskAttempt attempt=task.getAttempts().values().iterator().next(); app.waitForState(attempt,TaskAttemptState.RUNNING); String diagnostic1="Diagnostic1"; String diagnostic2="Diagnostic2"; app.getContext().getEventHandler().handle(new TaskAttemptDiagnosticsUpdateEvent(attempt.getID(),diagnostic1)); TaskAttemptStatus taskAttemptStatus=new TaskAttemptStatus(); taskAttemptStatus.id=attempt.getID(); taskAttemptStatus.progress=0.5f; taskAttemptStatus.stateString="RUNNING"; taskAttemptStatus.taskState=TaskAttemptState.RUNNING; taskAttemptStatus.phase=Phase.MAP; app.getContext().getEventHandler().handle(new TaskAttemptStatusUpdateEvent(attempt.getID(),taskAttemptStatus)); YarnRPC rpc=YarnRPC.create(conf); MRClientProtocol proxy=(MRClientProtocol)rpc.getProxy(MRClientProtocol.class,app.clientService.getBindAddress(),conf); GetCountersRequest gcRequest=recordFactory.newRecordInstance(GetCountersRequest.class); gcRequest.setJobId(job.getID()); Assert.assertNotNull("Counters is null",proxy.getCounters(gcRequest).getCounters()); GetJobReportRequest gjrRequest=recordFactory.newRecordInstance(GetJobReportRequest.class); gjrRequest.setJobId(job.getID()); JobReport jr=proxy.getJobReport(gjrRequest).getJobReport(); verifyJobReport(jr); GetTaskAttemptCompletionEventsRequest gtaceRequest=recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class); gtaceRequest.setJobId(job.getID()); gtaceRequest.setFromEventId(0); gtaceRequest.setMaxEvents(10); Assert.assertNotNull("TaskCompletionEvents is null",proxy.getTaskAttemptCompletionEvents(gtaceRequest).getCompletionEventList()); GetDiagnosticsRequest gdRequest=recordFactory.newRecordInstance(GetDiagnosticsRequest.class); gdRequest.setTaskAttemptId(attempt.getID()); Assert.assertNotNull("Diagnostics is null",proxy.getDiagnostics(gdRequest).getDiagnosticsList()); GetTaskAttemptReportRequest gtarRequest=recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class); gtarRequest.setTaskAttemptId(attempt.getID()); TaskAttemptReport tar=proxy.getTaskAttemptReport(gtarRequest).getTaskAttemptReport(); verifyTaskAttemptReport(tar); GetTaskReportRequest gtrRequest=recordFactory.newRecordInstance(GetTaskReportRequest.class); gtrRequest.setTaskId(task.getID()); Assert.assertNotNull("TaskReport is null",proxy.getTaskReport(gtrRequest).getTaskReport()); GetTaskReportsRequest gtreportsRequest=recordFactory.newRecordInstance(GetTaskReportsRequest.class); gtreportsRequest.setJobId(job.getID()); gtreportsRequest.setTaskType(TaskType.MAP); Assert.assertNotNull("TaskReports for map is null",proxy.getTaskReports(gtreportsRequest).getTaskReportList()); gtreportsRequest=recordFactory.newRecordInstance(GetTaskReportsRequest.class); gtreportsRequest.setJobId(job.getID()); gtreportsRequest.setTaskType(TaskType.REDUCE); Assert.assertNotNull("TaskReports for reduce is null",proxy.getTaskReports(gtreportsRequest).getTaskReportList()); List diag=proxy.getDiagnostics(gdRequest).getDiagnosticsList(); Assert.assertEquals("Num diagnostics not correct",1,diag.size()); Assert.assertEquals("Diag 1 not correct",diagnostic1,diag.get(0).toString()); TaskReport taskReport=proxy.getTaskReport(gtrRequest).getTaskReport(); Assert.assertEquals("Num diagnostics not correct",1,taskReport.getDiagnosticsCount()); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testViewAclOnlyCannotModify() throws Exception { final MRAppWithClientService app=new MRAppWithClientService(1,0,false); final Configuration conf=new Configuration(); conf.setBoolean(MRConfig.MR_ACLS_ENABLED,true); conf.set(MRJobConfig.JOB_ACL_VIEW_JOB,"viewonlyuser"); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("Num tasks not correct",1,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task task=it.next(); app.waitForState(task,TaskState.RUNNING); TaskAttempt attempt=task.getAttempts().values().iterator().next(); app.waitForState(attempt,TaskAttemptState.RUNNING); UserGroupInformation viewOnlyUser=UserGroupInformation.createUserForTesting("viewonlyuser",new String[]{}); Assert.assertTrue("viewonlyuser cannot view job",job.checkAccess(viewOnlyUser,JobACL.VIEW_JOB)); Assert.assertFalse("viewonlyuser can modify job",job.checkAccess(viewOnlyUser,JobACL.MODIFY_JOB)); MRClientProtocol client=viewOnlyUser.doAs(new PrivilegedExceptionAction(){ @Override public MRClientProtocol run() throws Exception { YarnRPC rpc=YarnRPC.create(conf); return (MRClientProtocol)rpc.getProxy(MRClientProtocol.class,app.clientService.getBindAddress(),conf); } } ); KillJobRequest killJobRequest=recordFactory.newRecordInstance(KillJobRequest.class); killJobRequest.setJobId(app.getJobId()); try { client.killJob(killJobRequest); fail("viewonlyuser killed job"); } catch ( AccessControlException e) { } KillTaskRequest killTaskRequest=recordFactory.newRecordInstance(KillTaskRequest.class); killTaskRequest.setTaskId(task.getID()); try { client.killTask(killTaskRequest); fail("viewonlyuser killed task"); } catch ( AccessControlException e) { } KillTaskAttemptRequest killTaskAttemptRequest=recordFactory.newRecordInstance(KillTaskAttemptRequest.class); killTaskAttemptRequest.setTaskAttemptId(attempt.getID()); try { client.killTaskAttempt(killTaskAttemptRequest); fail("viewonlyuser killed task attempt"); } catch ( AccessControlException e) { } FailTaskAttemptRequest failTaskAttemptRequest=recordFactory.newRecordInstance(FailTaskAttemptRequest.class); failTaskAttemptRequest.setTaskAttemptId(attempt.getID()); try { client.failTaskAttempt(failTaskAttemptRequest); fail("viewonlyuser killed task attempt"); } catch ( AccessControlException e) { } }

    Class: org.apache.hadoop.mapreduce.v2.app.TestRecovery

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testOutputRecovery() throws Exception { int runCount=0; MRApp app=new MRAppWithHistory(1,2,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task reduceTask1=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next(); app.waitForState(task1Attempt1,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); Assert.assertEquals(5467,task1Attempt1.getShufflePort()); app.waitForState(reduceTask1,TaskState.RUNNING); TaskAttempt reduce1Attempt1=reduceTask1.getAttempts().values().iterator().next(); writeOutput(reduce1Attempt1,conf); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(reduceTask1,TaskState.SUCCEEDED); app.stop(); app=new MRAppWithHistory(1,2,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); reduceTask1=it.next(); Task reduceTask2=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); task1Attempt1=mapTask1.getAttempts().values().iterator().next(); Assert.assertEquals(5467,task1Attempt1.getShufflePort()); app.waitForState(reduceTask1,TaskState.SUCCEEDED); app.waitForState(reduceTask2,TaskState.RUNNING); TaskAttempt reduce2Attempt=reduceTask2.getAttempts().values().iterator().next(); app.waitForState(reduce2Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce2Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(reduceTask2,TaskState.SUCCEEDED); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); validateOutput(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testRecoveryWithoutShuffleSecret() throws Exception { int runCount=0; MRApp app=new MRAppNoShuffleSecret(2,1,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); Task reduceTask=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next(); TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task1Attempt,TaskAttemptState.RUNNING); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState()); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.stop(); app=new MRAppNoShuffleSecret(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); reduceTask=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.waitForState(mapTask1,TaskState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask1.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(reduceTask,TaskState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); }

    InternalCallVerifier EqualityVerifier 
    /** * This test case primarily verifies if the recovery is controlled through config * property. In this case, recover is turned ON. AM with 3 maps and 0 reduce. * AM crashes after the first two tasks finishes and recovers completely and * succeeds in the second generation. * @throws Exception */ @Test public void testRecoverySuccessUsingCustomOutputCommitter() throws Exception { int runCount=0; MRApp app=new MRAppWithHistory(3,0,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean("want.am.recovery",true); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); Task mapTask3=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); app.waitForState(mapTask3,TaskState.RUNNING); TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next(); TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next(); TaskAttempt task3Attempt=mapTask3.getAttempts().values().iterator().next(); app.waitForState(task1Attempt,TaskAttemptState.RUNNING); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); app.waitForState(task3Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.stop(); app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class); conf.setBoolean("want.am.recovery",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setInt(MRJobConfig.NUM_REDUCES,0); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); mapTask3=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.waitForState(mapTask3,TaskState.RUNNING); task3Attempt=mapTask3.getAttempts().values().iterator().next(); app.waitForState(task3Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask3,TaskState.SUCCEEDED); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testOutputRecoveryMapsOnly() throws Exception { int runCount=0; MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); Task reduceTask1=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next(); app.waitForState(task1Attempt1,TaskAttemptState.RUNNING); writeBadOutput(task1Attempt1,conf); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); Assert.assertEquals(5467,task1Attempt1.getShufflePort()); app.stop(); app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); reduceTask1=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); task1Attempt1=mapTask1.getAttempts().values().iterator().next(); Assert.assertEquals(5467,task1Attempt1.getShufflePort()); app.waitForState(mapTask2,TaskState.RUNNING); TaskAttempt task2Attempt1=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task2Attempt1,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask2,TaskState.SUCCEEDED); Assert.assertEquals(5467,task2Attempt1.getShufflePort()); app.waitForState(reduceTask1,TaskState.RUNNING); TaskAttempt reduce1Attempt1=reduceTask1.getAttempts().values().iterator().next(); writeOutput(reduce1Attempt1,conf); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(reduceTask1,TaskState.SUCCEEDED); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); validateOutput(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt * completely disappears because of failed launch, one attempt gets killed and * one attempt succeeds. AM crashes after the first tasks finishes and * recovers completely and succeeds in the second generation. * @throws Exception */ @Test public void testSpeculative() throws Exception { int runCount=0; long am1StartTimeEst=System.currentTimeMillis(); MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); long jobStartTime=job.getReport().getStartTime(); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); Task reduceTask=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); app.getContext().getEventHandler().handle(new TaskEvent(mapTask1.getID(),TaskEventType.T_ADD_SPEC_ATTEMPT)); int timeOut=0; while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) { Thread.sleep(1000); LOG.info("Waiting for next attempt to start"); } Iterator t1it=mapTask1.getAttempts().values().iterator(); TaskAttempt task1Attempt1=t1it.next(); TaskAttempt task1Attempt2=t1it.next(); TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next(); ContainerId t1a2contId=task1Attempt2.getAssignedContainerID(); LOG.info(t1a2contId.toString()); LOG.info(task1Attempt1.getID().toString()); LOG.info(task1Attempt2.getID().toString()); app.getContext().getEventHandler().handle(new TaskAttemptContainerLaunchedEvent(task1Attempt2.getID(),runCount)); app.waitForState(task1Attempt1,TaskAttemptState.RUNNING); app.waitForState(task1Attempt2,TaskAttemptState.RUNNING); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState()); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(task1Attempt1,TaskAttemptState.SUCCEEDED); app.waitForState(mapTask1,TaskState.SUCCEEDED); long task1StartTime=mapTask1.getReport().getStartTime(); long task1FinishTime=mapTask1.getReport().getFinishTime(); app.stop(); long am2StartTimeEst=System.currentTimeMillis(); app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); reduceTask=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.RUNNING); task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.waitForState(reduceTask,TaskState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime()); Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime()); Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime()); Assert.assertEquals(2,job.getAMInfos().size()); int attemptNum=1; for ( AMInfo amInfo : job.getAMInfos()) { Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId()); Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId()); Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost()); Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort()); Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort()); } long am1StartTimeReal=job.getAMInfos().get(0).getStartTime(); long am2StartTimeReal=job.getAMInfos().get(1).getStartTime(); Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst); Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis()); }

    InternalCallVerifier EqualityVerifier 
    /** * This test case primarily verifies if the recovery is controlled through config * property. In this case, recover is turned OFF. AM with 3 maps and 0 reduce. * AM crashes after the first two tasks finishes and recovery fails and have * to rerun fully in the second generation and succeeds. * @throws Exception */ @Test public void testRecoveryFailsUsingCustomOutputCommitter() throws Exception { int runCount=0; MRApp app=new MRAppWithHistory(3,0,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean("want.am.recovery",false); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); Task mapTask3=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); app.waitForState(mapTask3,TaskState.RUNNING); TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next(); TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next(); TaskAttempt task3Attempt=mapTask3.getAttempts().values().iterator().next(); app.waitForState(task1Attempt,TaskAttemptState.RUNNING); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); app.waitForState(task3Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.stop(); app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setClass("mapred.output.committer.class",TestFileOutputCommitter.class,org.apache.hadoop.mapred.OutputCommitter.class); conf.setBoolean("want.am.recovery",false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setInt(MRJobConfig.NUM_REDUCES,0); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); mapTask3=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); app.waitForState(mapTask3,TaskState.RUNNING); task3Attempt=mapTask3.getAttempts().values().iterator().next(); app.waitForState(task3Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask1.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask3,TaskState.SUCCEEDED); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testRecoveryWithOldCommiter() throws Exception { int runCount=0; MRApp app=new MRAppWithHistory(1,2,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean("mapred.mapper.new-api",false); conf.setBoolean("mapred.reducer.new-api",false); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task reduceTask1=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next(); app.waitForState(task1Attempt1,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); Assert.assertEquals(5467,task1Attempt1.getShufflePort()); app.waitForState(reduceTask1,TaskState.RUNNING); TaskAttempt reduce1Attempt1=reduceTask1.getAttempts().values().iterator().next(); writeOutput(reduce1Attempt1,conf); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce1Attempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(reduceTask1,TaskState.SUCCEEDED); app.stop(); app=new MRAppWithHistory(1,2,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",false); conf.setBoolean("mapred.reducer.new-api",false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); reduceTask1=it.next(); Task reduceTask2=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); task1Attempt1=mapTask1.getAttempts().values().iterator().next(); Assert.assertEquals(5467,task1Attempt1.getShufflePort()); app.waitForState(reduceTask1,TaskState.SUCCEEDED); app.waitForState(reduceTask2,TaskState.RUNNING); TaskAttempt reduce2Attempt=reduceTask2.getAttempts().values().iterator().next(); app.waitForState(reduce2Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduce2Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(reduceTask2,TaskState.SUCCEEDED); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); validateOutput(); }

    InternalCallVerifier EqualityVerifier 
    /** * AM with 3 maps and 0 reduce. AM crashes after the first two tasks finishes * and recovers completely and succeeds in the second generation. * @throws Exception */ @Test public void testCrashOfMapsOnlyJob() throws Exception { int runCount=0; MRApp app=new MRAppWithHistory(3,0,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); Task mapTask3=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); app.waitForState(mapTask3,TaskState.RUNNING); TaskAttempt task1Attempt=mapTask1.getAttempts().values().iterator().next(); TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next(); TaskAttempt task3Attempt=mapTask3.getAttempts().values().iterator().next(); app.waitForState(task1Attempt,TaskAttemptState.RUNNING); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); app.waitForState(task3Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task2Attempt.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.stop(); app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setInt(MRJobConfig.NUM_REDUCES,0); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); mapTask3=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.waitForState(mapTask3,TaskState.RUNNING); task3Attempt=mapTask3.getAttempts().values().iterator().next(); app.waitForState(task3Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask3,TaskState.SUCCEEDED); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testMultipleCrashes() throws Exception { int runCount=0; MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); Task reduceTask=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next(); TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task1Attempt1,TaskAttemptState.RUNNING); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState()); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.stop(); app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); reduceTask=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.RUNNING); task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.stop(); app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); reduceTask=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.waitForState(reduceTask,TaskState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt * completely disappears because of failed launch, one attempt gets killed and * one attempt succeeds. AM crashes after the first tasks finishes and * recovers completely and succeeds in the second generation. * @throws Exception */ @Test public void testCrashed() throws Exception { int runCount=0; long am1StartTimeEst=System.currentTimeMillis(); MRApp app=new MRAppWithHistory(2,1,false,this.getClass().getName(),true,++runCount); Configuration conf=new Configuration(); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); long jobStartTime=job.getReport().getStartTime(); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); Iterator it=job.getTasks().values().iterator(); Task mapTask1=it.next(); Task mapTask2=it.next(); Task reduceTask=it.next(); app.waitForState(mapTask1,TaskState.RUNNING); app.waitForState(mapTask2,TaskState.RUNNING); TaskAttempt task1Attempt1=mapTask1.getAttempts().values().iterator().next(); TaskAttempt task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task1Attempt1,TaskAttemptState.RUNNING); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); Assert.assertEquals("Reduce Task state not correct",TaskState.RUNNING,reduceTask.getReport().getTaskState()); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt1.getID(),TaskAttemptEventType.TA_FAILMSG)); app.waitForState(task1Attempt1,TaskAttemptState.FAILED); int timeOut=0; while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) { Thread.sleep(2000); LOG.info("Waiting for next attempt to start"); } Assert.assertEquals(2,mapTask1.getAttempts().size()); Iterator itr=mapTask1.getAttempts().values().iterator(); itr.next(); TaskAttempt task1Attempt2=itr.next(); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt2.getID(),TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)); app.waitForState(task1Attempt2,TaskAttemptState.FAILED); timeOut=0; while (mapTask1.getAttempts().size() != 3 && timeOut++ < 10) { Thread.sleep(2000); LOG.info("Waiting for next attempt to start"); } Assert.assertEquals(3,mapTask1.getAttempts().size()); itr=mapTask1.getAttempts().values().iterator(); itr.next(); itr.next(); TaskAttempt task1Attempt3=itr.next(); app.waitForState(task1Attempt3,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt3.getID(),TaskAttemptEventType.TA_KILL)); app.waitForState(task1Attempt3,TaskAttemptState.KILLED); timeOut=0; while (mapTask1.getAttempts().size() != 4 && timeOut++ < 10) { Thread.sleep(2000); LOG.info("Waiting for next attempt to start"); } Assert.assertEquals(4,mapTask1.getAttempts().size()); itr=mapTask1.getAttempts().values().iterator(); itr.next(); itr.next(); itr.next(); TaskAttempt task1Attempt4=itr.next(); app.waitForState(task1Attempt4,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(task1Attempt4.getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask1,TaskState.SUCCEEDED); long task1StartTime=mapTask1.getReport().getStartTime(); long task1FinishTime=mapTask1.getReport().getFinishTime(); app.stop(); long am2StartTimeEst=System.currentTimeMillis(); app=new MRAppWithHistory(2,1,false,this.getClass().getName(),false,++runCount); conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,true); conf.setBoolean("mapred.mapper.new-api",true); conf.setBoolean("mapred.reducer.new-api",true); conf.set(FileOutputFormat.OUTDIR,outputDir.toString()); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Assert.assertEquals("No of tasks not correct",3,job.getTasks().size()); it=job.getTasks().values().iterator(); mapTask1=it.next(); mapTask2=it.next(); reduceTask=it.next(); app.waitForState(mapTask1,TaskState.SUCCEEDED); app.waitForState(mapTask2,TaskState.RUNNING); task2Attempt=mapTask2.getAttempts().values().iterator().next(); app.waitForState(task2Attempt,TaskAttemptState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(mapTask2.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(mapTask2,TaskState.SUCCEEDED); app.waitForState(reduceTask,TaskState.RUNNING); app.getContext().getEventHandler().handle(new TaskAttemptEvent(reduceTask.getAttempts().values().iterator().next().getID(),TaskAttemptEventType.TA_DONE)); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertEquals("Job Start time not correct",jobStartTime,job.getReport().getStartTime()); Assert.assertEquals("Task Start time not correct",task1StartTime,mapTask1.getReport().getStartTime()); Assert.assertEquals("Task Finish time not correct",task1FinishTime,mapTask1.getReport().getFinishTime()); Assert.assertEquals(2,job.getAMInfos().size()); int attemptNum=1; for ( AMInfo amInfo : job.getAMInfos()) { Assert.assertEquals(attemptNum++,amInfo.getAppAttemptId().getAttemptId()); Assert.assertEquals(amInfo.getAppAttemptId(),amInfo.getContainerId().getApplicationAttemptId()); Assert.assertEquals(MRApp.NM_HOST,amInfo.getNodeManagerHost()); Assert.assertEquals(MRApp.NM_PORT,amInfo.getNodeManagerPort()); Assert.assertEquals(MRApp.NM_HTTP_PORT,amInfo.getNodeManagerHttpPort()); } long am1StartTimeReal=job.getAMInfos().get(0).getStartTime(); long am2StartTimeReal=job.getAMInfos().get(1).getStartTime(); Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst && am1StartTimeReal <= am2StartTimeEst); Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst && am2StartTimeReal <= System.currentTimeMillis()); }

    Class: org.apache.hadoop.mapreduce.v2.app.TestStagingCleanup

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testDeletionofStaging() throws IOException { conf.set(MRJobConfig.MAPREDUCE_JOB_DIR,stagingJobDir); fs=mock(FileSystem.class); when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true); String user=UserGroupInformation.getCurrentUser().getShortUserName(); Path stagingDir=MRApps.getStagingAreaDir(conf,user); when(fs.exists(stagingDir)).thenReturn(true); ApplicationId appId=ApplicationId.newInstance(System.currentTimeMillis(),0); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1); JobId jobid=recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); ContainerAllocator mockAlloc=mock(ContainerAllocator.class); Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); MRAppMaster appMaster=new TestMRApp(attemptId,mockAlloc,JobStateInternal.RUNNING,MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS); appMaster.init(conf); appMaster.start(); appMaster.shutDownJob(); Assert.assertEquals(true,((TestMRApp)appMaster).getTestIsLastAMRetry()); verify(fs).delete(stagingJobPath,true); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=20000) public void testStagingCleanupOrder() throws Exception { MRAppTestCleanup app=new MRAppTestCleanup(1,1,true,this.getClass().getName(),true); JobImpl job=(JobImpl)app.submit(new Configuration()); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); int waitTime=20 * 1000; while (waitTime > 0 && app.numStops < 2) { Thread.sleep(100); waitTime-=100; } Assert.assertEquals(1,app.ContainerAllocatorStopped); Assert.assertEquals(2,app.stagingDirCleanedup); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testNoDeletionofStagingOnReboot() throws IOException { conf.set(MRJobConfig.MAPREDUCE_JOB_DIR,stagingJobDir); fs=mock(FileSystem.class); when(fs.delete(any(Path.class),anyBoolean())).thenReturn(true); String user=UserGroupInformation.getCurrentUser().getShortUserName(); Path stagingDir=MRApps.getStagingAreaDir(conf,user); when(fs.exists(stagingDir)).thenReturn(true); ApplicationId appId=ApplicationId.newInstance(System.currentTimeMillis(),0); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1); ContainerAllocator mockAlloc=mock(ContainerAllocator.class); Assert.assertTrue(MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS > 1); MRAppMaster appMaster=new TestMRApp(attemptId,mockAlloc,JobStateInternal.REBOOT,MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS); appMaster.init(conf); appMaster.start(); appMaster.shutDownJob(); Assert.assertEquals(false,((TestMRApp)appMaster).getTestIsLastAMRetry()); verify(fs,times(0)).delete(stagingJobPath,true); }

    Class: org.apache.hadoop.mapreduce.v2.app.commit.TestCommitterEventHandler

    InternalCallVerifier EqualityVerifier 
    @Test public void testCommitWindow() throws Exception { Configuration conf=new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); AsyncDispatcher dispatcher=new AsyncDispatcher(); dispatcher.init(conf); dispatcher.start(); TestingJobEventHandler jeh=new TestingJobEventHandler(); dispatcher.register(JobEventType.class,jeh); SystemClock clock=new SystemClock(); AppContext appContext=mock(AppContext.class); ApplicationAttemptId attemptid=ConverterUtils.toApplicationAttemptId("appattempt_1234567890000_0001_0"); when(appContext.getApplicationID()).thenReturn(attemptid.getApplicationId()); when(appContext.getApplicationAttemptId()).thenReturn(attemptid); when(appContext.getEventHandler()).thenReturn(dispatcher.getEventHandler()); when(appContext.getClock()).thenReturn(clock); OutputCommitter committer=mock(OutputCommitter.class); TestingRMHeartbeatHandler rmhh=new TestingRMHeartbeatHandler(); CommitterEventHandler ceh=new CommitterEventHandler(appContext,committer,rmhh); ceh.init(conf); ceh.start(); ceh.handle(new CommitterJobCommitEvent(null,null)); long timeToWaitMs=5000; while (rmhh.getNumCallbacks() != 1 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs-=10; } Assert.assertEquals("committer did not register a heartbeat callback",1,rmhh.getNumCallbacks()); verify(committer,never()).commitJob(any(JobContext.class)); Assert.assertEquals("committer should not have committed",0,jeh.numCommitCompletedEvents); rmhh.setLastHeartbeatTime(clock.getTime()); timeToWaitMs=5000; while (jeh.numCommitCompletedEvents != 1 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs-=10; } Assert.assertEquals("committer did not complete commit after RM hearbeat",1,jeh.numCommitCompletedEvents); verify(committer,times(1)).commitJob(any(JobContext.class)); cleanup(); ceh.handle(new CommitterJobCommitEvent(null,null)); timeToWaitMs=5000; while (jeh.numCommitCompletedEvents != 2 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs-=10; } Assert.assertEquals("committer did not commit",2,jeh.numCommitCompletedEvents); verify(committer,times(2)).commitJob(any(JobContext.class)); ceh.stop(); dispatcher.stop(); }

    Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=20000) public void testRebootedDuringCommit() throws Exception { Configuration conf=new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS,2); AsyncDispatcher dispatcher=new AsyncDispatcher(); dispatcher.init(conf); dispatcher.start(); CyclicBarrier syncBarrier=new CyclicBarrier(2); OutputCommitter committer=new WaitingOutputCommitter(syncBarrier,true); CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer); commitHandler.init(conf); commitHandler.start(); AppContext mockContext=mock(AppContext.class); when(mockContext.isLastAMRetry()).thenReturn(true); when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false); JobImpl job=createRunningStubbedJob(conf,dispatcher,2,mockContext); completeJobTasks(job); assertJobState(job,JobStateInternal.COMMITTING); syncBarrier.await(); job.handle(new JobEvent(job.getID(),JobEventType.JOB_AM_REBOOT)); assertJobState(job,JobStateInternal.REBOOT); Assert.assertEquals(JobState.RUNNING,job.getState()); when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true); Assert.assertEquals(JobState.ERROR,job.getState()); dispatcher.stop(); commitHandler.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=20000) public void testRebootedDuringSetup() throws Exception { Configuration conf=new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,stagingDir); AsyncDispatcher dispatcher=new AsyncDispatcher(); dispatcher.init(conf); dispatcher.start(); OutputCommitter committer=new StubbedOutputCommitter(){ @Override public synchronized void setupJob( JobContext jobContext) throws IOException { while (!Thread.interrupted()) { try { wait(); } catch ( InterruptedException e) { } } } } ; CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer); commitHandler.init(conf); commitHandler.start(); AppContext mockContext=mock(AppContext.class); when(mockContext.isLastAMRetry()).thenReturn(false); JobImpl job=createStubbedJob(conf,dispatcher,2,mockContext); JobId jobId=job.getID(); job.handle(new JobEvent(jobId,JobEventType.JOB_INIT)); assertJobState(job,JobStateInternal.INITED); job.handle(new JobStartEvent(jobId)); assertJobState(job,JobStateInternal.SETUP); job.handle(new JobEvent(job.getID(),JobEventType.JOB_AM_REBOOT)); assertJobState(job,JobStateInternal.REBOOT); Assert.assertEquals(JobState.RUNNING,job.getState()); dispatcher.stop(); commitHandler.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTransitionsAtFailed() throws IOException { Configuration conf=new Configuration(); AsyncDispatcher dispatcher=new AsyncDispatcher(); dispatcher.init(conf); dispatcher.start(); OutputCommitter committer=mock(OutputCommitter.class); doThrow(new IOException("forcefail")).when(committer).setupJob(any(JobContext.class)); CommitterEventHandler commitHandler=createCommitterEventHandler(dispatcher,committer); commitHandler.init(conf); commitHandler.start(); AppContext mockContext=mock(AppContext.class); when(mockContext.hasSuccessfullyUnregistered()).thenReturn(false); JobImpl job=createStubbedJob(conf,dispatcher,2,mockContext); JobId jobId=job.getID(); job.handle(new JobEvent(jobId,JobEventType.JOB_INIT)); assertJobState(job,JobStateInternal.INITED); job.handle(new JobStartEvent(jobId)); assertJobState(job,JobStateInternal.FAILED); job.handle(new JobEvent(jobId,JobEventType.JOB_TASK_COMPLETED)); assertJobState(job,JobStateInternal.FAILED); job.handle(new JobEvent(jobId,JobEventType.JOB_TASK_ATTEMPT_COMPLETED)); assertJobState(job,JobStateInternal.FAILED); job.handle(new JobEvent(jobId,JobEventType.JOB_MAP_TASK_RESCHEDULED)); assertJobState(job,JobStateInternal.FAILED); job.handle(new JobEvent(jobId,JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE)); assertJobState(job,JobStateInternal.FAILED); Assert.assertEquals(JobState.RUNNING,job.getState()); when(mockContext.hasSuccessfullyUnregistered()).thenReturn(true); Assert.assertEquals(JobState.FAILED,job.getState()); dispatcher.stop(); commitHandler.stop(); }

    Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestMapReduceChildJVM

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testCommandLine() throws Exception { MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true); Configuration conf=new Configuration(); conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true); Job job=app.submit(conf); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertEquals("[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java"+ " -Djava.net.preferIPv4Stack=true"+ " -Dhadoop.metrics.log.level=WARN"+ " -Xmx200m -Djava.io.tmpdir="+ MRApps.crossPlatformify("PWD")+ "/tmp"+ " -Dlog4j.configuration=container-log4j.properties"+ " -Dyarn.app.container.log.dir="+ " -Dyarn.app.container.log.filesize=0"+ " -Dhadoop.root.logger=INFO,CLA"+ " org.apache.hadoop.mapred.YarnChild 127.0.0.1"+ " 54321"+ " attempt_0_0000_m_000000_0"+ " 0"+ " 1>/stdout"+ " 2>/stderr ]",app.myCommandLine); Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER")); Assert.assertEquals("INFO,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS")); Assert.assertEquals("",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testEnvironmentVariables() throws Exception { MyMRApp app=new MyMRApp(1,0,true,this.getClass().getName(),true); Configuration conf=new Configuration(); conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_CLIENT_OPTS=test"); conf.setStrings(MRJobConfig.MAP_LOG_LEVEL,"WARN"); conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,false); Job job=app.submit(conf); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER")); Assert.assertEquals("WARN,console",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); Assert.assertTrue("HADOOP_CLIENT_OPTS not set for job",app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS")); Assert.assertEquals("test",app.cmdEnvironment.get("HADOOP_CLIENT_OPTS")); app=new MyMRApp(1,0,true,this.getClass().getName(),true); conf=new Configuration(); conf.set(JobConf.MAPRED_MAP_TASK_ENV,"HADOOP_ROOT_LOGGER=trace"); job=app.submit(conf); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); Assert.assertTrue("HADOOP_ROOT_LOGGER not set for job",app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER")); Assert.assertEquals("trace",app.cmdEnvironment.get("HADOOP_ROOT_LOGGER")); }

    Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttempt

    BranchVerifier UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testHostResolveAttempt() throws Exception { TaskAttemptImpl.RequestContainerTransition rct=new TaskAttemptImpl.RequestContainerTransition(false); EventHandler eventHandler=mock(EventHandler.class); String[] hosts=new String[3]; hosts[0]="192.168.1.1"; hosts[1]="host2"; hosts[2]="host3"; TaskSplitMetaInfo splitInfo=new TaskSplitMetaInfo(hosts,0,128 * 1024 * 1024l); TaskAttemptImpl mockTaskAttempt=createMapTaskAttemptImplForTest(eventHandler,splitInfo); TaskAttemptImpl spyTa=spy(mockTaskAttempt); when(spyTa.resolveHost(hosts[0])).thenReturn("host1"); spyTa.dataLocalHosts=spyTa.resolveHosts(splitInfo.getLocations()); TaskAttemptEvent mockTAEvent=mock(TaskAttemptEvent.class); rct.transition(spyTa,mockTAEvent); verify(spyTa).resolveHost(hosts[0]); ArgumentCaptor arg=ArgumentCaptor.forClass(Event.class); verify(eventHandler,times(2)).handle(arg.capture()); if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) { Assert.fail("Second Event not of type ContainerRequestEvent"); } Map expected=new HashMap(); expected.put("host1",true); expected.put("host2",true); expected.put("host3",true); ContainerRequestEvent cre=(ContainerRequestEvent)arg.getAllValues().get(1); String[] requestedHosts=cre.getHosts(); for ( String h : requestedHosts) { expected.remove(h); } assertEquals(0,expected.size()); }

    BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testSingleRackRequest() throws Exception { TaskAttemptImpl.RequestContainerTransition rct=new TaskAttemptImpl.RequestContainerTransition(false); EventHandler eventHandler=mock(EventHandler.class); String[] hosts=new String[3]; hosts[0]="host1"; hosts[1]="host2"; hosts[2]="host3"; TaskSplitMetaInfo splitInfo=new TaskSplitMetaInfo(hosts,0,128 * 1024 * 1024l); TaskAttemptImpl mockTaskAttempt=createMapTaskAttemptImplForTest(eventHandler,splitInfo); TaskAttemptEvent mockTAEvent=mock(TaskAttemptEvent.class); rct.transition(mockTaskAttempt,mockTAEvent); ArgumentCaptor arg=ArgumentCaptor.forClass(Event.class); verify(eventHandler,times(2)).handle(arg.capture()); if (!(arg.getAllValues().get(1) instanceof ContainerRequestEvent)) { Assert.fail("Second Event not of type ContainerRequestEvent"); } ContainerRequestEvent cre=(ContainerRequestEvent)arg.getAllValues().get(1); String[] requestedRacks=cre.getRacks(); assertEquals(1,requestedRacks.length); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testFetchFailureAttemptFinishTime() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,mock(Token.class),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.1",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED); assertTrue("Task Attempt finish time is not greater than 0",taImpl.getFinishTime() > 0); Long finishTime=taImpl.getFinishTime(); Thread.sleep(5); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE)); assertEquals("Task attempt is not in Too Many Fetch Failure state",taImpl.getState(),TaskAttemptState.FAILED); assertEquals("After TA_TOO_MANY_FETCH_FAILURE," + " Task attempt finish time is not the same ",finishTime,Long.valueOf(taImpl.getFinishTime())); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testDoubleTooManyFetchFailure() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.1",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE)); assertEquals("Task attempt is not in FAILED state",taImpl.getState(),TaskAttemptState.FAILED); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE)); assertEquals("Task attempt is not in FAILED state, still",taImpl.getState(),TaskAttemptState.FAILED); assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testLaunchFailedWhileKilling() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),null); NodeId nid=NodeId.newInstance("127.0.0.1",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED)); assertFalse(eventHandler.internalError); assertEquals("Task attempt is not assigned on the local node",Locality.NODE_LOCAL,taImpl.getLocality()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testTooManyFetchFailureAfterKill() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,mock(Token.class),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.1",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_DONE)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertEquals("Task attempt is not in succeeded state",taImpl.getState(),TaskAttemptState.SUCCEEDED); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL)); assertEquals("Task attempt is not in KILLED state",taImpl.getState(),TaskAttemptState.KILLED); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_TOO_MANY_FETCH_FAILURE)); assertEquals("Task attempt is not in KILLED state, still",taImpl.getState(),TaskAttemptState.KILLED); assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerCleanedWhileCommitting() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.1",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_COMMIT_PENDING)); assertEquals("Task attempt is not in commit pending state",taImpl.getState(),TaskAttemptState.COMMIT_PENDING); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError); assertEquals("Task attempt is assigned locally",Locality.OFF_SWITCH,taImpl.getLocality()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerKillWhileCommitPending() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.2",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_COMMIT_PENDING)); assertEquals("Task should be in COMMIT_PENDING state",TaskAttemptStateInternal.COMMIT_PENDING,taImpl.getInternalState()); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL)); assertFalse("InternalError occurred trying to handle TA_KILL",eventHandler.internalError); assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testContainerKillAfterAssigned() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.2",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); assertEquals("Task attempt is not in assinged state",taImpl.getInternalState(),TaskAttemptStateInternal.ASSIGNED); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL)); assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerCleanedWhileRunning() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.2",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_CONTAINER_CLEANED)); assertFalse("InternalError occurred trying to handle TA_CONTAINER_CLEANED",eventHandler.internalError); assertEquals("Task attempt is not assigned on the local rack",Locality.RACK_LOCAL,taImpl.getLocality()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerKillWhileRunning() throws Exception { ApplicationId appId=ApplicationId.newInstance(1,2); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,0); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); Path jobFile=mock(Path.class); MockEventHandler eventHandler=new MockEventHandler(); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,"10"); TaskSplitMetaInfo splits=mock(TaskSplitMetaInfo.class); when(splits.getLocations()).thenReturn(new String[]{"127.0.0.1"}); AppContext appCtx=mock(AppContext.class); ClusterInfo clusterInfo=mock(ClusterInfo.class); Resource resource=mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); when(resource.getMemory()).thenReturn(1024); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,splits,jobConf,taListener,new Token(),new Credentials(),new SystemClock(),appCtx); NodeId nid=NodeId.newInstance("127.0.0.2",0); ContainerId contId=ContainerId.newInstance(appAttemptId,3); Container container=mock(Container.class); when(container.getId()).thenReturn(contId); when(container.getNodeId()).thenReturn(nid); when(container.getNodeHttpAddress()).thenReturn("localhost:0"); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_SCHEDULE)); taImpl.handle(new TaskAttemptContainerAssignedEvent(attemptId,container,mock(Map.class))); taImpl.handle(new TaskAttemptContainerLaunchedEvent(attemptId,0)); assertEquals("Task attempt is not in running state",taImpl.getState(),TaskAttemptState.RUNNING); taImpl.handle(new TaskAttemptEvent(attemptId,TaskAttemptEventType.TA_KILL)); assertFalse("InternalError occurred trying to handle TA_KILL",eventHandler.internalError); assertEquals("Task should be in KILLED state",TaskAttemptStateInternal.KILL_CONTAINER_CLEANUP,taImpl.getInternalState()); }

    Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskAttemptContainerRequest

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testAttemptContainerRequest() throws Exception { final Text SECRET_KEY_ALIAS=new Text("secretkeyalias"); final byte[] SECRET_KEY=("secretkey").getBytes(); Map acls=new HashMap(1); acls.put(ApplicationAccessType.VIEW_APP,"otheruser"); ApplicationId appId=ApplicationId.newInstance(1,1); JobId jobId=MRBuilderUtils.newJobId(appId,1); TaskId taskId=MRBuilderUtils.newTaskId(jobId,1,TaskType.MAP); Path jobFile=mock(Path.class); EventHandler eventHandler=mock(EventHandler.class); TaskAttemptListener taListener=mock(TaskAttemptListener.class); when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost",0)); JobConf jobConf=new JobConf(); jobConf.setClass("fs.file.impl",StubbedFS.class,FileSystem.class); jobConf.setBoolean("fs.file.impl.disable.cache",true); jobConf.set(JobConf.MAPRED_MAP_TASK_ENV,""); jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(jobConf); Credentials credentials=new Credentials(); credentials.addSecretKey(SECRET_KEY_ALIAS,SECRET_KEY); Token jobToken=new Token(("tokenid").getBytes(),("tokenpw").getBytes(),new Text("tokenkind"),new Text("tokenservice")); TaskAttemptImpl taImpl=new MapTaskAttemptImpl(taskId,1,eventHandler,jobFile,1,mock(TaskSplitMetaInfo.class),jobConf,taListener,jobToken,credentials,new SystemClock(),null); jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID,taImpl.getID().toString()); ContainerLaunchContext launchCtx=TaskAttemptImpl.createContainerLaunchContext(acls,jobConf,jobToken,taImpl.createRemoteTask(),TypeConverter.fromYarn(jobId),mock(WrappedJvmID.class),taListener,credentials); Assert.assertEquals("ACLs mismatch",acls,launchCtx.getApplicationACLs()); Credentials launchCredentials=new Credentials(); DataInputByteBuffer dibb=new DataInputByteBuffer(); dibb.reset(launchCtx.getTokens()); launchCredentials.readTokenStorageStream(dibb); for ( Token token : credentials.getAllTokens()) { Token launchToken=launchCredentials.getToken(token.getService()); Assert.assertNotNull("Token " + token.getService() + " is missing",launchToken); Assert.assertEquals("Token " + token.getService() + " mismatch",token,launchToken); } Assert.assertNotNull("Secret key missing",launchCredentials.getSecretKey(SECRET_KEY_ALIAS)); Assert.assertTrue("Secret key mismatch",Arrays.equals(SECRET_KEY,launchCredentials.getSecretKey(SECRET_KEY_ALIAS))); }

    Class: org.apache.hadoop.mapreduce.v2.app.job.impl.TestTaskImpl

    EqualityVerifier 
    @Test public void testSpeculativeMapFailedFetchFailure(){ mockTask=createMockTask(TaskType.MAP); runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_FAILED); assertEquals(2,taskAttempts.size()); mockTask.handle(new TaskTAttemptEvent(taskAttempts.get(1).getAttemptId(),TaskEventType.T_ATTEMPT_FAILED)); assertTaskScheduledState(); assertEquals(3,taskAttempts.size()); }

    EqualityVerifier 
    @Test public void testSpeculativeMapFetchFailure(){ mockTask=createMockTask(TaskType.MAP); runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_KILLED); assertEquals(2,taskAttempts.size()); mockTask.handle(new TaskTAttemptEvent(taskAttempts.get(1).getAttemptId(),TaskEventType.T_ATTEMPT_FAILED)); assertTaskScheduledState(); assertEquals(3,taskAttempts.size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testCountersWithSpeculation(){ mockTask=new MockTaskImpl(jobId,partition,dispatcher.getEventHandler(),remoteJobConfFile,conf,taskAttemptListener,jobToken,credentials,clock,startCount,metrics,appContext,TaskType.MAP){ @Override protected int getMaxAttempts(){ return 1; } } ; TaskId taskId=getNewTaskID(); scheduleTaskAttempt(taskId); launchTaskAttempt(getLastAttempt().getAttemptId()); updateLastAttemptState(TaskAttemptState.RUNNING); MockTaskAttemptImpl baseAttempt=getLastAttempt(); mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ADD_SPEC_ATTEMPT)); launchTaskAttempt(getLastAttempt().getAttemptId()); updateLastAttemptState(TaskAttemptState.RUNNING); MockTaskAttemptImpl specAttempt=getLastAttempt(); assertEquals(2,taskAttempts.size()); Counters specAttemptCounters=new Counters(); Counter cpuCounter=specAttemptCounters.findCounter(TaskCounter.CPU_MILLISECONDS); cpuCounter.setValue(1000); specAttempt.setCounters(specAttemptCounters); commitTaskAttempt(specAttempt.getAttemptId()); specAttempt.setProgress(1.0f); specAttempt.setState(TaskAttemptState.SUCCEEDED); mockTask.handle(new TaskTAttemptEvent(specAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_SUCCEEDED)); assertEquals(TaskState.SUCCEEDED,mockTask.getState()); baseAttempt.setProgress(1.0f); Counters taskCounters=mockTask.getCounters(); assertEquals("wrong counters for task",specAttemptCounters,taskCounters); }

    EqualityVerifier 
    @Test public void testSpeculativeMapMultipleSucceedFetchFailure(){ mockTask=createMockTask(TaskType.MAP); runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_SUCCEEDED); assertEquals(2,taskAttempts.size()); mockTask.handle(new TaskTAttemptEvent(taskAttempts.get(1).getAttemptId(),TaskEventType.T_ATTEMPT_FAILED)); assertTaskScheduledState(); assertEquals(3,taskAttempts.size()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testFailureDuringTaskAttemptCommit(){ mockTask=createMockTask(TaskType.MAP); TaskId taskId=getNewTaskID(); scheduleTaskAttempt(taskId); launchTaskAttempt(getLastAttempt().getAttemptId()); updateLastAttemptState(TaskAttemptState.COMMIT_PENDING); commitTaskAttempt(getLastAttempt().getAttemptId()); updateLastAttemptState(TaskAttemptState.FAILED); failRunningTaskAttempt(getLastAttempt().getAttemptId()); assertEquals(2,taskAttempts.size()); updateLastAttemptState(TaskAttemptState.SUCCEEDED); commitTaskAttempt(getLastAttempt().getAttemptId()); mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ATTEMPT_SUCCEEDED)); assertFalse("First attempt should not commit",mockTask.canCommit(taskAttempts.get(0).getAttemptId())); assertTrue("Second attempt should commit",mockTask.canCommit(getLastAttempt().getAttemptId())); assertTaskSucceededState(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testFailedTransitions(){ mockTask=new MockTaskImpl(jobId,partition,dispatcher.getEventHandler(),remoteJobConfFile,conf,taskAttemptListener,jobToken,credentials,clock,startCount,metrics,appContext,TaskType.MAP){ @Override protected int getMaxAttempts(){ return 1; } } ; TaskId taskId=getNewTaskID(); scheduleTaskAttempt(taskId); launchTaskAttempt(getLastAttempt().getAttemptId()); mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ADD_SPEC_ATTEMPT)); launchTaskAttempt(getLastAttempt().getAttemptId()); mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ADD_SPEC_ATTEMPT)); launchTaskAttempt(getLastAttempt().getAttemptId()); mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ADD_SPEC_ATTEMPT)); launchTaskAttempt(getLastAttempt().getAttemptId()); assertEquals(4,taskAttempts.size()); MockTaskAttemptImpl taskAttempt=taskAttempts.get(0); taskAttempt.setState(TaskAttemptState.FAILED); mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_FAILED)); assertEquals(TaskState.FAILED,mockTask.getState()); mockTask.handle(new TaskEvent(taskId,TaskEventType.T_KILL)); assertEquals(TaskState.FAILED,mockTask.getState()); mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ADD_SPEC_ATTEMPT)); mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),TaskEventType.T_ATTEMPT_LAUNCHED)); assertEquals(TaskState.FAILED,mockTask.getState()); assertEquals(4,taskAttempts.size()); taskAttempt=taskAttempts.get(1); taskAttempt.setState(TaskAttemptState.COMMIT_PENDING); mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_COMMIT_PENDING)); assertEquals(TaskState.FAILED,mockTask.getState()); taskAttempt.setState(TaskAttemptState.FAILED); mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_FAILED)); assertEquals(TaskState.FAILED,mockTask.getState()); taskAttempt=taskAttempts.get(2); taskAttempt.setState(TaskAttemptState.SUCCEEDED); mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_SUCCEEDED)); assertEquals(TaskState.FAILED,mockTask.getState()); taskAttempt=taskAttempts.get(3); taskAttempt.setState(TaskAttemptState.KILLED); mockTask.handle(new TaskTAttemptEvent(taskAttempt.getAttemptId(),TaskEventType.T_ATTEMPT_KILLED)); assertEquals(TaskState.FAILED,mockTask.getState()); }

    Class: org.apache.hadoop.mapreduce.v2.app.launcher.TestContainerLauncher

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=5000) public void testPoolLimits() throws InterruptedException { ApplicationId appId=ApplicationId.newInstance(12345,67); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,3); JobId jobId=MRBuilderUtils.newJobId(appId,8); TaskId taskId=MRBuilderUtils.newTaskId(jobId,9,TaskType.MAP); TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,0); ContainerId containerId=ContainerId.newInstance(appAttemptId,10); AppContext context=mock(AppContext.class); CustomContainerLauncher containerLauncher=new CustomContainerLauncher(context); Configuration conf=new Configuration(); conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,12); containerLauncher.init(conf); containerLauncher.start(); ThreadPoolExecutor threadPool=containerLauncher.getThreadPool(); containerLauncher.expectedCorePoolSize=ContainerLauncherImpl.INITIAL_POOL_SIZE; for (int i=0; i < 10; i++) { containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher,10); Assert.assertEquals(10,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.expectedCorePoolSize=12; for (int i=1; i <= 4; i++) { containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host1" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher,12); Assert.assertEquals(12,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.finishEventHandling=true; waitForEvents(containerLauncher,14); Assert.assertEquals(12,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testSlowNM() throws Exception { conf=new Configuration(); int maxAttempts=1; conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS,maxAttempts); conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE,false); conf.setInt("yarn.rpc.nm-command-timeout",3000); conf.set(YarnConfiguration.IPC_RPC_IMPL,HadoopYarnProtoRPC.class.getName()); YarnRPC rpc=YarnRPC.create(conf); String bindAddr="localhost:0"; InetSocketAddress addr=NetUtils.createSocketAddr(bindAddr); NMTokenSecretManagerInNM tokenSecretManager=new NMTokenSecretManagerInNM(); MasterKey masterKey=Records.newRecord(MasterKey.class); masterKey.setBytes(ByteBuffer.wrap("key".getBytes())); tokenSecretManager.setMasterKey(masterKey); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"token"); server=rpc.getServer(ContainerManagementProtocol.class,new DummyContainerManager(),addr,conf,tokenSecretManager,1); server.start(); MRApp app=new MRAppWithSlowNM(tokenSecretManager); try { Job job=app.submit(conf); app.waitForState(job,JobState.RUNNING); Map tasks=job.getTasks(); Assert.assertEquals("Num tasks is not correct",1,tasks.size()); Task task=tasks.values().iterator().next(); app.waitForState(task,TaskState.SCHEDULED); Map attempts=tasks.values().iterator().next().getAttempts(); Assert.assertEquals("Num attempts is not correct",maxAttempts,attempts.size()); TaskAttempt attempt=attempts.values().iterator().next(); app.waitForInternalState((TaskAttemptImpl)attempt,TaskAttemptStateInternal.ASSIGNED); app.waitForState(job,JobState.FAILED); String diagnostics=attempt.getDiagnostics().toString(); LOG.info("attempt.getDiagnostics: " + diagnostics); Assert.assertTrue(diagnostics.contains("Container launch failed for " + "container_0_0000_01_000000 : ")); Assert.assertTrue(diagnostics.contains("java.net.SocketTimeoutException: 3000 millis timeout while waiting for channel")); } finally { server.stop(); app.stop(); } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=5000) public void testPoolSize() throws InterruptedException { ApplicationId appId=ApplicationId.newInstance(12345,67); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,3); JobId jobId=MRBuilderUtils.newJobId(appId,8); TaskId taskId=MRBuilderUtils.newTaskId(jobId,9,TaskType.MAP); AppContext context=mock(AppContext.class); CustomContainerLauncher containerLauncher=new CustomContainerLauncher(context); containerLauncher.init(new Configuration()); containerLauncher.start(); ThreadPoolExecutor threadPool=containerLauncher.getThreadPool(); Assert.assertEquals(0,threadPool.getPoolSize()); Assert.assertEquals(ContainerLauncherImpl.INITIAL_POOL_SIZE,threadPool.getCorePoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.expectedCorePoolSize=ContainerLauncherImpl.INITIAL_POOL_SIZE; for (int i=0; i < 10; i++) { ContainerId containerId=ContainerId.newInstance(appAttemptId,i); TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,i); containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher,10); Assert.assertEquals(10,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.finishEventHandling=true; int timeOut=0; while (containerLauncher.numEventsProcessed.get() < 10 && timeOut++ < 200) { LOG.info("Waiting for number of events processed to become " + 10 + ". It is now "+ containerLauncher.numEventsProcessed.get()+ ". Timeout is "+ timeOut); Thread.sleep(1000); } Assert.assertEquals(10,containerLauncher.numEventsProcessed.get()); containerLauncher.finishEventHandling=false; for (int i=0; i < 10; i++) { ContainerId containerId=ContainerId.newInstance(appAttemptId,i + 10); TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,i + 10); containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host" + i + ":1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); } waitForEvents(containerLauncher,20); Assert.assertEquals(10,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.expectedCorePoolSize=11 + ContainerLauncherImpl.INITIAL_POOL_SIZE; containerLauncher.finishEventHandling=false; ContainerId containerId=ContainerId.newInstance(appAttemptId,21); TaskAttemptId taskAttemptId=MRBuilderUtils.newTaskAttemptId(taskId,21); containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,containerId,"host11:1234",null,ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH)); waitForEvents(containerLauncher,21); Assert.assertEquals(11,threadPool.getPoolSize()); Assert.assertNull(containerLauncher.foundErrors); containerLauncher.stop(); }

    Class: org.apache.hadoop.mapreduce.v2.app.rm.TestRMContainerAllocator

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testPreemptReducers() throws Exception { LOG.info("Running testPreemptReducers"); Configuration conf=new Configuration(); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob,new SystemClock()); allocator.setMapResourceRequest(1024); allocator.setReduceResourceRequest(1024); RMContainerAllocator.AssignedRequests assignedRequests=allocator.getAssignedRequests(); RMContainerAllocator.ScheduledRequests scheduledRequests=allocator.getScheduledRequests(); ContainerRequestEvent event1=createReq(jobId,1,2048,new String[]{"h1"},false,false); scheduledRequests.maps.put(mock(TaskAttemptId.class),new RMContainerRequestor.ContainerRequest(event1,null)); assignedRequests.reduces.put(mock(TaskAttemptId.class),mock(Container.class)); allocator.preemptReducesIfNeeded(); Assert.assertEquals("The reducer is not preempted",1,assignedRequests.preemptionWaitingReduces.size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testReportedAppProgress() throws Exception { LOG.info("Running testReportedAppProgress"); Configuration conf=new Configuration(); final MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher rmDispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp rmApp=rm.submitApp(1024); rmDispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",21504); amNodeManager.nodeHeartbeat(true); rmDispatcher.await(); final ApplicationAttemptId appAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); rmDispatcher.await(); MRApp mrApp=new MRApp(appAttemptId,ContainerId.newInstance(appAttemptId,0),10,10,false,this.getClass().getName(),true,1){ @Override protected Dispatcher createDispatcher(){ return new DrainDispatcher(); } protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context){ return new MyContainerAllocator(rm,appAttemptId,context); } } ; Assert.assertEquals(0.0,rmApp.getProgress(),0.0); mrApp.submit(conf); Job job=mrApp.getContext().getAllJobs().entrySet().iterator().next().getValue(); DrainDispatcher amDispatcher=(DrainDispatcher)mrApp.getDispatcher(); MyContainerAllocator allocator=(MyContainerAllocator)mrApp.getContainerAllocator(); mrApp.waitForInternalState((JobImpl)job,JobStateInternal.RUNNING); amDispatcher.await(); for ( Task t : job.getTasks().values()) { if (t.getType() == TaskType.MAP) { mrApp.waitForInternalState((TaskAttemptImpl)t.getAttempts().values().iterator().next(),TaskAttemptStateInternal.UNASSIGNED); } } amDispatcher.await(); allocator.schedule(); rmDispatcher.await(); amNodeManager.nodeHeartbeat(true); rmDispatcher.await(); allocator.schedule(); rmDispatcher.await(); for ( Task t : job.getTasks().values()) { if (t.getType() == TaskType.MAP) { mrApp.waitForState(t,TaskState.RUNNING); } } allocator.schedule(); rmDispatcher.await(); Assert.assertEquals(0.05f,job.getProgress(),0.001f); Assert.assertEquals(0.05f,rmApp.getProgress(),0.001f); Iterator it=job.getTasks().values().iterator(); finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,1); allocator.schedule(); rmDispatcher.await(); Assert.assertEquals(0.095f,job.getProgress(),0.001f); Assert.assertEquals(0.095f,rmApp.getProgress(),0.001f); finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,7); allocator.schedule(); rmDispatcher.await(); Assert.assertEquals(0.41f,job.getProgress(),0.001f); Assert.assertEquals(0.41f,rmApp.getProgress(),0.001f); finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,2); allocator.schedule(); rmDispatcher.await(); amNodeManager.nodeHeartbeat(true); rmDispatcher.await(); allocator.schedule(); rmDispatcher.await(); for ( Task t : job.getTasks().values()) { if (t.getType() == TaskType.REDUCE) { mrApp.waitForState(t,TaskState.RUNNING); } } finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,2); allocator.schedule(); rmDispatcher.await(); Assert.assertEquals(0.59f,job.getProgress(),0.001f); Assert.assertEquals(0.59f,rmApp.getProgress(),0.001f); finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,8); allocator.schedule(); rmDispatcher.await(); Assert.assertEquals(0.95f,job.getProgress(),0.001f); Assert.assertEquals(0.95f,rmApp.getProgress(),0.001f); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testResource() throws Exception { LOG.info("Running testResource"); Configuration conf=new Configuration(); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nodeManager1=rm.registerNode("h1:1234",10240); MockNM nodeManager2=rm.registerNode("h2:1234",10240); MockNM nodeManager3=rm.registerNode("h3:1234",10240); dispatcher.await(); ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event1); ContainerRequestEvent event2=createReq(jobId,2,2048,new String[]{"h2"}); allocator.sendRequest(event2); List assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); nodeManager1.nodeHeartbeat(true); nodeManager2.nodeHeartbeat(true); nodeManager3.nodeHeartbeat(true); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); checkAssignments(new ContainerRequestEvent[]{event1,event2},assigned,false); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testHeartbeatHandler() throws Exception { LOG.info("Running testHeartbeatHandler"); Configuration conf=new Configuration(); conf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS,1); ControlledClock clock=new ControlledClock(new SystemClock()); AppContext appContext=mock(AppContext.class); when(appContext.getClock()).thenReturn(clock); when(appContext.getApplicationID()).thenReturn(ApplicationId.newInstance(1,1)); RMContainerAllocator allocator=new RMContainerAllocator(mock(ClientService.class),appContext,new NoopAMPreemptionPolicy()){ @Override protected void register(){ } @Override protected ApplicationMasterProtocol createSchedulerProxy(){ return mock(ApplicationMasterProtocol.class); } @Override protected synchronized void heartbeat() throws Exception { } } ; allocator.init(conf); allocator.start(); clock.setTime(5); int timeToWaitMs=5000; while (allocator.getLastHeartbeatTime() != 5 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs-=10; } Assert.assertEquals(5,allocator.getLastHeartbeatTime()); clock.setTime(7); timeToWaitMs=5000; while (allocator.getLastHeartbeatTime() != 7 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs-=10; } Assert.assertEquals(7,allocator.getLastHeartbeatTime()); final AtomicBoolean callbackCalled=new AtomicBoolean(false); allocator.runOnNextHeartbeat(new Runnable(){ @Override public void run(){ callbackCalled.set(true); } } ); clock.setTime(8); timeToWaitMs=5000; while (allocator.getLastHeartbeatTime() != 8 && timeToWaitMs > 0) { Thread.sleep(10); timeToWaitMs-=10; } Assert.assertEquals(8,allocator.getLastHeartbeatTime()); Assert.assertTrue(callbackCalled.get()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testBlackListedNodes() throws Exception { LOG.info("Running testBlackListedNodes"); Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true); conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1); conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nodeManager1=rm.registerNode("h1:1234",10240); MockNM nodeManager2=rm.registerNode("h2:1234",10240); MockNM nodeManager3=rm.registerNode("h3:1234",10240); dispatcher.await(); ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event1); ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h2"}); allocator.sendRequest(event2); ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h3"}); allocator.sendRequest(event3); List assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false); allocator.sendFailure(f1); ContainerFailedEvent f2=createFailEvent(jobId,1,"h2",false); allocator.sendFailure(f2); nodeManager1.nodeHeartbeat(true); nodeManager2.nodeHeartbeat(true); dispatcher.await(); assigned=allocator.schedule(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); assertBlacklistAdditionsAndRemovals(2,0,rm); nodeManager1.nodeHeartbeat(false); nodeManager2.nodeHeartbeat(false); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); assertBlacklistAdditionsAndRemovals(0,0,rm); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); nodeManager3.nodeHeartbeat(true); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); assertBlacklistAdditionsAndRemovals(0,0,rm); Assert.assertTrue("No of assignments must be 3",assigned.size() == 3); for ( TaskAttemptContainerAssignedEvent assig : assigned) { Assert.assertTrue("Assigned container host not correct","h3".equals(assig.getContainer().getNodeId().getHost())); } }

    APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMapNodeLocality() throws Exception { LOG.info("Running testMapNodeLocality"); Configuration conf=new Configuration(); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nodeManager1=rm.registerNode("h1:1234",3072); rm.registerNode("h2:1234",10240); MockNM nodeManager3=rm.registerNode("h3:1234",1536); dispatcher.await(); ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event1); ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h1"}); allocator.sendRequest(event2); ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h2"}); allocator.sendRequest(event3); List assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); nodeManager3.nodeHeartbeat(true); nodeManager1.nodeHeartbeat(true); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); checkAssignments(new ContainerRequestEvent[]{event1,event2,event3},assigned,false); for ( TaskAttemptContainerAssignedEvent event : assigned) { if (event.getTaskAttemptID().equals(event3.getAttemptID())) { assigned.remove(event); Assert.assertTrue(event.getContainer().getNodeId().getHost().equals("h3")); break; } } checkAssignments(new ContainerRequestEvent[]{event1,event2},assigned,true); }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testIgnoreBlacklisting() throws Exception { LOG.info("Running testIgnoreBlacklisting"); Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true); conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1); conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,33); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM[] nodeManagers=new MockNM[10]; int nmNum=0; List assigned=null; nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher); nodeManagers[0].nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); assigned=getContainerOnHost(jobId,1,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm); Assert.assertEquals("No of assignments must be 1",1,assigned.size()); LOG.info("Failing container _1 on H1 (Node should be blacklisted and" + " ignore blacklisting enabled"); ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false); allocator.sendFailure(f1); assigned=getContainerOnHost(jobId,2,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,1,0,0,1,rm); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); assigned=getContainerOnHost(jobId,2,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm); Assert.assertEquals("No of assignments must be 1",1,assigned.size()); nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher); assigned=getContainerOnHost(jobId,3,1024,new String[]{"h2"},nodeManagers[1],dispatcher,allocator,0,0,0,0,rm); Assert.assertEquals("No of assignments must be 1",1,assigned.size()); nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher); assigned=getContainerOnHost(jobId,4,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm); Assert.assertEquals("No of assignments must be 1",1,assigned.size()); assigned=getContainerOnHost(jobId,5,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm); Assert.assertEquals("No of assignments must be 1",1,assigned.size()); nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher); assigned=getContainerOnHost(jobId,6,1024,new String[]{"h4"},nodeManagers[3],dispatcher,allocator,0,0,1,0,rm); Assert.assertEquals("No of assignments must be 1",1,assigned.size()); assigned=getContainerOnHost(jobId,7,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); ContainerFailedEvent f2=createFailEvent(jobId,3,"h2",false); allocator.sendFailure(f2); assigned=getContainerOnHost(jobId,8,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,1,0,0,2,rm); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); assigned=getContainerOnHost(jobId,8,1024,new String[]{"h1"},nodeManagers[0],dispatcher,allocator,0,0,0,0,rm); Assert.assertEquals("No of assignments must be 2",2,assigned.size()); assigned=getContainerOnHost(jobId,9,1024,new String[]{"h2"},nodeManagers[1],dispatcher,allocator,0,0,0,0,rm); Assert.assertEquals("No of assignments must be 1",1,assigned.size()); ContainerFailedEvent f3=createFailEvent(jobId,4,"h3",false); allocator.sendFailure(f3); nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher); assigned=getContainerOnHost(jobId,10,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm); Assert.assertEquals("No of assignments must be 1",1,assigned.size()); for (int i=0; i < 5; i++) { nodeManagers[nmNum]=registerNodeManager(nmNum++,rm,dispatcher); assigned=getContainerOnHost(jobId,11 + i,1024,new String[]{String.valueOf(5 + i)},nodeManagers[4 + i],dispatcher,allocator,0,0,(i == 4 ? 3 : 0),0,rm); Assert.assertEquals("No of assignments must be 1",1,assigned.size()); } assigned=getContainerOnHost(jobId,20,1024,new String[]{"h3"},nodeManagers[2],dispatcher,allocator,0,0,0,0,rm); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testNonAggressivelyPreemptReducers() throws Exception { LOG.info("Running testPreemptReducers"); final int preemptThreshold=2; Configuration conf=new Configuration(); conf.setInt(MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC,preemptThreshold); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); ControlledClock clock=new ControlledClock(null); clock.setTime(1); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob,clock); allocator.setMapResourceRequest(1024); allocator.setReduceResourceRequest(1024); RMContainerAllocator.AssignedRequests assignedRequests=allocator.getAssignedRequests(); RMContainerAllocator.ScheduledRequests scheduledRequests=allocator.getScheduledRequests(); ContainerRequestEvent event1=createReq(jobId,1,2048,new String[]{"h1"},false,false); scheduledRequests.maps.put(mock(TaskAttemptId.class),new RMContainerRequestor.ContainerRequest(event1,null,clock.getTime())); assignedRequests.reduces.put(mock(TaskAttemptId.class),mock(Container.class)); clock.setTime(clock.getTime() + 1); allocator.preemptReducesIfNeeded(); Assert.assertEquals("The reducer is aggressively preeempted",0,assignedRequests.preemptionWaitingReduces.size()); clock.setTime(clock.getTime() + (preemptThreshold) * 1000); allocator.preemptReducesIfNeeded(); Assert.assertEquals("The reducer is not preeempted",1,assignedRequests.preemptionWaitingReduces.size()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testUpdatedNodes() throws Exception { Configuration conf=new Configuration(); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nm1=rm.registerNode("h1:1234",10240); MockNM nm2=rm.registerNode("h2:1234",10240); dispatcher.await(); ContainerRequestEvent event=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event); TaskAttemptId attemptId=event.getAttemptID(); TaskAttempt mockTaskAttempt=mock(TaskAttempt.class); when(mockTaskAttempt.getNodeId()).thenReturn(nm1.getNodeId()); Task mockTask=mock(Task.class); when(mockTask.getAttempt(attemptId)).thenReturn(mockTaskAttempt); when(mockJob.getTask(attemptId.getTaskId())).thenReturn(mockTask); List assigned=allocator.schedule(); dispatcher.await(); nm1.nodeHeartbeat(true); dispatcher.await(); Assert.assertEquals(1,allocator.getJobUpdatedNodeEvents().size()); Assert.assertEquals(3,allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size()); allocator.getJobUpdatedNodeEvents().clear(); assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals(1,assigned.size()); Assert.assertEquals(nm1.getNodeId(),assigned.get(0).getContainer().getNodeId()); Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty()); Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty()); nm1.nodeHeartbeat(false); nm2.nodeHeartbeat(false); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals(0,assigned.size()); Assert.assertEquals(1,allocator.getJobUpdatedNodeEvents().size()); Assert.assertEquals(1,allocator.getTaskAttemptKillEvents().size()); Assert.assertEquals(2,allocator.getJobUpdatedNodeEvents().get(0).getUpdatedNodes().size()); Assert.assertEquals(attemptId,allocator.getTaskAttemptKillEvents().get(0).getTaskAttemptID()); allocator.getJobUpdatedNodeEvents().clear(); allocator.getTaskAttemptKillEvents().clear(); assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals(0,assigned.size()); Assert.assertTrue(allocator.getJobUpdatedNodeEvents().isEmpty()); Assert.assertTrue(allocator.getTaskAttemptKillEvents().isEmpty()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testBlackListedNodesWithSchedulingToThatNode() throws Exception { LOG.info("Running testBlackListedNodesWithSchedulingToThatNode"); Configuration conf=new Configuration(); conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true); conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1); conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nodeManager1=rm.registerNode("h1:1234",10240); MockNM nodeManager3=rm.registerNode("h3:1234",10240); dispatcher.await(); LOG.info("Requesting 1 Containers _1 on H1"); ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event1); LOG.info("RM Heartbeat (to send the container requests)"); List assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); LOG.info("h1 Heartbeat (To actually schedule the containers)"); nodeManager1.nodeHeartbeat(true); dispatcher.await(); LOG.info("RM Heartbeat (To process the scheduled containers)"); assigned=allocator.schedule(); dispatcher.await(); assertBlacklistAdditionsAndRemovals(0,0,rm); Assert.assertEquals("No of assignments must be 1",1,assigned.size()); LOG.info("Failing container _1 on H1 (should blacklist the node)"); ContainerFailedEvent f1=createFailEvent(jobId,1,"h1",false); allocator.sendFailure(f1); ContainerRequestEvent event1f=createReq(jobId,1,1024,new String[]{"h1"},true,false); allocator.sendRequest(event1f); assigned=allocator.schedule(); dispatcher.await(); assertBlacklistAdditionsAndRemovals(1,0,rm); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h1","h3"}); allocator.sendRequest(event3); LOG.info("h1 Heartbeat (To actually schedule the containers)"); nodeManager1.nodeHeartbeat(true); dispatcher.await(); LOG.info("RM Heartbeat (To process the scheduled containers)"); assigned=allocator.schedule(); dispatcher.await(); assertBlacklistAdditionsAndRemovals(0,0,rm); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); LOG.info("RM Heartbeat (To process the re-scheduled containers)"); assigned=allocator.schedule(); dispatcher.await(); assertBlacklistAdditionsAndRemovals(0,0,rm); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); LOG.info("h3 Heartbeat (To re-schedule the containers)"); nodeManager3.nodeHeartbeat(true); dispatcher.await(); LOG.info("RM Heartbeat (To process the re-scheduled containers for H3)"); assigned=allocator.schedule(); assertBlacklistAdditionsAndRemovals(0,0,rm); dispatcher.await(); for ( TaskAttemptContainerAssignedEvent assig : assigned) { LOG.info(assig.getTaskAttemptID() + " assgined to " + assig.getContainer().getId()+ " with priority "+ assig.getContainer().getPriority()); } Assert.assertEquals("No of assignments must be 2",2,assigned.size()); for ( TaskAttemptContainerAssignedEvent assig : assigned) { Assert.assertEquals("Assigned container " + assig.getContainer().getId() + " host not correct","h3",assig.getContainer().getNodeId().getHost()); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSimple() throws Exception { LOG.info("Running testSimple"); Configuration conf=new Configuration(); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nodeManager1=rm.registerNode("h1:1234",10240); MockNM nodeManager2=rm.registerNode("h2:1234",10240); MockNM nodeManager3=rm.registerNode("h3:1234",10240); dispatcher.await(); ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event1); ContainerRequestEvent event2=createReq(jobId,2,1024,new String[]{"h2"}); allocator.sendRequest(event2); List assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); Assert.assertEquals(4,rm.getMyFifoScheduler().lastAsk.size()); ContainerRequestEvent event3=createReq(jobId,3,1024,new String[]{"h3"}); allocator.sendRequest(event3); assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); Assert.assertEquals(3,rm.getMyFifoScheduler().lastAsk.size()); nodeManager1.nodeHeartbeat(true); nodeManager2.nodeHeartbeat(true); nodeManager3.nodeHeartbeat(true); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals(0,rm.getMyFifoScheduler().lastAsk.size()); checkAssignments(new ContainerRequestEvent[]{event1,event2,event3},assigned,false); assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals(5,rm.getMyFifoScheduler().lastAsk.size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testCompletedContainerEvent(){ RMContainerAllocator allocator=new RMContainerAllocator(mock(ClientService.class),mock(AppContext.class),new NoopAMPreemptionPolicy()); TaskAttemptId attemptId=MRBuilderUtils.newTaskAttemptId(MRBuilderUtils.newTaskId(MRBuilderUtils.newJobId(1,1,1),1,TaskType.MAP),1); ApplicationId applicationId=ApplicationId.newInstance(1,1); ApplicationAttemptId applicationAttemptId=ApplicationAttemptId.newInstance(applicationId,1); ContainerId containerId=ContainerId.newInstance(applicationAttemptId,1); ContainerStatus status=ContainerStatus.newInstance(containerId,ContainerState.RUNNING,"",0); ContainerStatus abortedStatus=ContainerStatus.newInstance(containerId,ContainerState.RUNNING,"",ContainerExitStatus.ABORTED); TaskAttemptEvent event=allocator.createContainerFinishedEvent(status,attemptId); Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,event.getType()); TaskAttemptEvent abortedEvent=allocator.createContainerFinishedEvent(abortedStatus,attemptId); Assert.assertEquals(TaskAttemptEventType.TA_KILL,abortedEvent.getType()); ContainerId containerId2=ContainerId.newInstance(applicationAttemptId,2); ContainerStatus status2=ContainerStatus.newInstance(containerId2,ContainerState.RUNNING,"",0); ContainerStatus preemptedStatus=ContainerStatus.newInstance(containerId2,ContainerState.RUNNING,"",ContainerExitStatus.PREEMPTED); TaskAttemptEvent event2=allocator.createContainerFinishedEvent(status2,attemptId); Assert.assertEquals(TaskAttemptEventType.TA_CONTAINER_COMPLETED,event2.getType()); TaskAttemptEvent abortedEvent2=allocator.createContainerFinishedEvent(preemptedStatus,attemptId); Assert.assertEquals(TaskAttemptEventType.TA_KILL,abortedEvent2.getType()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testReportedAppProgressWithOnlyMaps() throws Exception { LOG.info("Running testReportedAppProgressWithOnlyMaps"); Configuration conf=new Configuration(); final MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher rmDispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp rmApp=rm.submitApp(1024); rmDispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",11264); amNodeManager.nodeHeartbeat(true); rmDispatcher.await(); final ApplicationAttemptId appAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); rmDispatcher.await(); MRApp mrApp=new MRApp(appAttemptId,ContainerId.newInstance(appAttemptId,0),10,0,false,this.getClass().getName(),true,1){ @Override protected Dispatcher createDispatcher(){ return new DrainDispatcher(); } protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context){ return new MyContainerAllocator(rm,appAttemptId,context); } } ; Assert.assertEquals(0.0,rmApp.getProgress(),0.0); mrApp.submit(conf); Job job=mrApp.getContext().getAllJobs().entrySet().iterator().next().getValue(); DrainDispatcher amDispatcher=(DrainDispatcher)mrApp.getDispatcher(); MyContainerAllocator allocator=(MyContainerAllocator)mrApp.getContainerAllocator(); mrApp.waitForInternalState((JobImpl)job,JobStateInternal.RUNNING); amDispatcher.await(); for ( Task t : job.getTasks().values()) { mrApp.waitForInternalState((TaskAttemptImpl)t.getAttempts().values().iterator().next(),TaskAttemptStateInternal.UNASSIGNED); } amDispatcher.await(); allocator.schedule(); rmDispatcher.await(); amNodeManager.nodeHeartbeat(true); rmDispatcher.await(); allocator.schedule(); rmDispatcher.await(); for ( Task t : job.getTasks().values()) { mrApp.waitForState(t,TaskState.RUNNING); } allocator.schedule(); rmDispatcher.await(); Assert.assertEquals(0.05f,job.getProgress(),0.001f); Assert.assertEquals(0.05f,rmApp.getProgress(),0.001f); Iterator it=job.getTasks().values().iterator(); finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,1); allocator.schedule(); rmDispatcher.await(); Assert.assertEquals(0.14f,job.getProgress(),0.001f); Assert.assertEquals(0.14f,rmApp.getProgress(),0.001f); finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,5); allocator.schedule(); rmDispatcher.await(); Assert.assertEquals(0.59f,job.getProgress(),0.001f); Assert.assertEquals(0.59f,rmApp.getProgress(),0.001f); finishNextNTasks(rmDispatcher,amNodeManager,mrApp,it,4); allocator.schedule(); rmDispatcher.await(); Assert.assertEquals(0.95f,job.getProgress(),0.001f); Assert.assertEquals(0.95f,rmApp.getProgress(),0.001f); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRMContainerAllocatorResendsRequestsOnRMRestart() throws Exception { Configuration conf=new Configuration(); conf.set(YarnConfiguration.RECOVERY_ENABLED,"true"); conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName()); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED,true); conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE,true); conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER,1); conf.setInt(MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,-1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MyResourceManager rm1=new MyResourceManager(conf,memStore); rm1.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher(); RMApp app=rm1.submitApp(1024); dispatcher.await(); MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm1.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm1,conf,appAttemptId,mockJob); ContainerRequestEvent event1=createReq(jobId,1,1024,new String[]{"h1"}); allocator.sendRequest(event1); ContainerRequestEvent event2=createReq(jobId,2,2048,new String[]{"h1","h2"}); allocator.sendRequest(event2); ContainerFailedEvent f1=createFailEvent(jobId,1,"h2",false); allocator.sendFailure(f1); List assignedContainers=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size()); assertAsksAndReleases(3,0,rm1); assertBlacklistAdditionsAndRemovals(1,0,rm1); nm1.nodeHeartbeat(true); dispatcher.await(); assignedContainers=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 2",2,assignedContainers.size()); assertAsksAndReleases(0,0,rm1); assertBlacklistAdditionsAndRemovals(0,0,rm1); assignedContainers=allocator.schedule(); Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size()); assertAsksAndReleases(3,0,rm1); assertBlacklistAdditionsAndRemovals(0,0,rm1); ContainerRequestEvent event3=createReq(jobId,3,1000,new String[]{"h1"}); allocator.sendRequest(event3); ContainerAllocatorEvent deallocate1=createDeallocateEvent(jobId,1,false); allocator.sendDeallocate(deallocate1); assignedContainers=allocator.schedule(); Assert.assertEquals("No of assignments must be 0",0,assignedContainers.size()); assertAsksAndReleases(3,1,rm1); assertBlacklistAdditionsAndRemovals(0,0,rm1); MyResourceManager rm2=new MyResourceManager(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); allocator.updateSchedulerProxy(rm2); dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher(); NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction()); nm1=new MockNM("h1:1234",10240,rm2.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); dispatcher.await(); ContainerAllocatorEvent deallocate2=createDeallocateEvent(jobId,2,false); allocator.sendDeallocate(deallocate2); ContainerFailedEvent f2=createFailEvent(jobId,1,"h3",false); allocator.sendFailure(f2); ContainerRequestEvent event4=createReq(jobId,4,2000,new String[]{"h1","h2"}); allocator.sendRequest(event4); allocator.schedule(); dispatcher.await(); Assert.assertTrue("Last allocate response is not RESYNC",allocator.isResyncCommand()); ContainerRequestEvent event5=createReq(jobId,5,3000,new String[]{"h1","h2","h3"}); allocator.sendRequest(event5); assignedContainers=allocator.schedule(); dispatcher.await(); assertAsksAndReleases(3,2,rm2); assertBlacklistAdditionsAndRemovals(2,0,rm2); nm1.nodeHeartbeat(true); dispatcher.await(); assignedContainers=allocator.schedule(); dispatcher.await(); Assert.assertEquals("Number of container should be 3",3,assignedContainers.size()); for ( TaskAttemptContainerAssignedEvent assig : assignedContainers) { Assert.assertTrue("Assigned count not correct","h1".equals(assig.getContainer().getNodeId().getHost())); } rm1.stop(); rm2.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMapReduceScheduling() throws Exception { LOG.info("Running testMapReduceScheduling"); Configuration conf=new Configuration(); MyResourceManager rm=new MyResourceManager(conf); rm.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm.getRMContext().getDispatcher(); RMApp app=rm.submitApp(1024); dispatcher.await(); MockNM amNodeManager=rm.registerNode("amNM:1234",2048); amNodeManager.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm.sendAMLaunched(appAttemptId); dispatcher.await(); JobId jobId=MRBuilderUtils.newJobId(appAttemptId.getApplicationId(),0); Job mockJob=mock(Job.class); when(mockJob.getReport()).thenReturn(MRBuilderUtils.newJobReport(jobId,"job","user",JobState.RUNNING,0,0,0,0,0,0,0,"jobfile",null,false,"")); MyContainerAllocator allocator=new MyContainerAllocator(rm,conf,appAttemptId,mockJob); MockNM nodeManager1=rm.registerNode("h1:1234",1024); MockNM nodeManager2=rm.registerNode("h2:1234",10240); MockNM nodeManager3=rm.registerNode("h3:1234",10240); dispatcher.await(); ContainerRequestEvent event1=createReq(jobId,1,2048,new String[]{"h1","h2"},true,false); allocator.sendRequest(event1); ContainerRequestEvent event2=createReq(jobId,2,3000,new String[]{"h1"},false,true); allocator.sendRequest(event2); ContainerRequestEvent event3=createReq(jobId,3,2048,new String[]{"h3"},false,false); allocator.sendRequest(event3); List assigned=allocator.schedule(); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,assigned.size()); nodeManager1.nodeHeartbeat(true); nodeManager2.nodeHeartbeat(true); nodeManager3.nodeHeartbeat(true); dispatcher.await(); assigned=allocator.schedule(); dispatcher.await(); checkAssignments(new ContainerRequestEvent[]{event1,event3},assigned,false); for ( TaskAttemptContainerAssignedEvent assig : assigned) { Assert.assertFalse("Assigned count not correct","h1".equals(assig.getContainer().getNodeId().getHost())); } }

    Class: org.apache.hadoop.mapreduce.v2.app.speculate.TestDataStatistics

    InternalCallVerifier EqualityVerifier 
    @Test public void testSingleEntryDataStatistics() throws Exception { DataStatistics statistics=new DataStatistics(17.29); Assert.assertEquals(1,statistics.count(),TOL); Assert.assertEquals(17.29,statistics.mean(),TOL); Assert.assertEquals(0,statistics.var(),TOL); Assert.assertEquals(0,statistics.std(),TOL); Assert.assertEquals(17.29,statistics.outlier(1.0f),TOL); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testEmptyDataStatistics() throws Exception { DataStatistics statistics=new DataStatistics(); Assert.assertEquals(0,statistics.count(),TOL); Assert.assertEquals(0,statistics.mean(),TOL); Assert.assertEquals(0,statistics.var(),TOL); Assert.assertEquals(0,statistics.std(),TOL); Assert.assertEquals(0,statistics.outlier(1.0f),TOL); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testUpdateStatistics() throws Exception { DataStatistics statistics=new DataStatistics(17); statistics.add(29); Assert.assertEquals(2,statistics.count(),TOL); Assert.assertEquals(23.0,statistics.mean(),TOL); Assert.assertEquals(36.0,statistics.var(),TOL); statistics.updateStatistics(17,29); Assert.assertEquals(2,statistics.count(),TOL); Assert.assertEquals(29.0,statistics.mean(),TOL); Assert.assertEquals(0.0,statistics.var(),TOL); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testMutiEntryDataStatistics() throws Exception { DataStatistics statistics=new DataStatistics(); statistics.add(17); statistics.add(29); Assert.assertEquals(2,statistics.count(),TOL); Assert.assertEquals(23.0,statistics.mean(),TOL); Assert.assertEquals(36.0,statistics.var(),TOL); Assert.assertEquals(6.0,statistics.std(),TOL); Assert.assertEquals(29.0,statistics.outlier(1.0f),TOL); }

    Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebApp

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testMRWebAppRedirection() throws Exception { String[] schemePrefix={WebAppUtils.HTTP_PREFIX,WebAppUtils.HTTPS_PREFIX}; for ( String scheme : schemePrefix) { MRApp app=new MRApp(2,2,true,this.getClass().getName(),true){ @Override protected ClientService createClientService( AppContext context){ return new MRClientService(context); } } ; Configuration conf=new Configuration(); conf.set(YarnConfiguration.PROXY_ADDRESS,"9.9.9.9"); conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,scheme.equals(WebAppUtils.HTTPS_PREFIX) ? Policy.HTTPS_ONLY.name() : Policy.HTTP_ONLY.name()); webProxyBase="/proxy/" + app.getAppID(); conf.set("hadoop.http.filter.initializers",TestAMFilterInitializer.class.getName()); Job job=app.submit(conf); String hostPort=NetUtils.getHostPortString(((MRClientService)app.getClientService()).getWebApp().getListenerAddress()); URL httpUrl=new URL("http://" + hostPort + "/mapreduce"); HttpURLConnection conn=(HttpURLConnection)httpUrl.openConnection(); conn.setInstanceFollowRedirects(false); conn.connect(); String expectedURL=scheme + conf.get(YarnConfiguration.PROXY_ADDRESS) + ProxyUriUtils.getPath(app.getAppID(),"/mapreduce"); Assert.assertEquals(expectedURL,conn.getHeaderField(HttpHeaders.LOCATION)); Assert.assertEquals(HttpStatus.SC_MOVED_TEMPORARILY,conn.getResponseCode()); app.waitForState(job,JobState.SUCCEEDED); app.verifyCompleted(); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppControllerIndex(){ AppContext ctx=new MockAppContext(0,1,1,1); Injector injector=WebAppTests.createMockInjector(AppContext.class,ctx); AppController controller=injector.getInstance(AppController.class); controller.index(); assertEquals(ctx.getApplicationID().toString(),controller.get(APP_ID,"")); }

    Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServices

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidAccept() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("mapreduce").accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAMDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce/").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyAMInfo(json.getJSONObject("info"),appContext); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidUri2() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("invalid").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testBlacklistedNodesXML() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("blacklistednodes").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); verifyBlacklistedNodesInfoXML(xml,appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfoSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("info/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyAMInfo(json.getJSONObject("info"),appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfoDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("info/").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyAMInfo(json.getJSONObject("info"),appContext); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidUri() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("mapreduce").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAMXML() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); verifyAMInfoXML(xml,appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfoXML() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("info/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); verifyAMInfoXML(xml,appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testBlacklistedNodes() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("blacklistednodes").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyBlacklistedNodesInfo(json,appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAMSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyAMInfo(json.getJSONObject("info"),appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfo() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("info").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyAMInfo(json.getJSONObject("info"),appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAM() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyAMInfo(json.getJSONObject("info"),appContext); }

    Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesAttempts

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptIdSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("taskAttempt"); verifyAMTaskAttempt(info,att,task.getType()); } } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptsSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyAMTaskAttempts(json,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptIdXML() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("taskAttempt"); for (int i=0; i < nodes.getLength(); i++) { Element element=(Element)nodes.item(i); verifyAMTaskAttemptXML(element,att,task.getType()); } } } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptsXML() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList attempts=dom.getElementsByTagName("taskAttempts"); assertEquals("incorrect number of elements",1,attempts.getLength()); NodeList nodes=dom.getElementsByTagName("taskAttempt"); verifyAMTaskAttemptsXML(nodes,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttempts() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyAMTaskAttempts(json,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptsDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyAMTaskAttempts(json,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptIdCounters() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobTaskAttemptCounters"); verifyAMJobTaskAttemptCounters(info,att); } } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptIdXMLCounters() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("jobTaskAttemptCounters"); verifyAMTaskCountersXML(nodes,att); } } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptId() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("taskAttempt"); verifyAMTaskAttempt(info,att,task.getType()); } } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptIdDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("taskAttempt"); verifyAMTaskAttempt(info,att,task.getType()); } } } }

    Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobConf

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobConfDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("conf"); verifyAMJobConf(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobConf() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("conf"); verifyAMJobConf(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobConfSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("conf"); verifyAMJobConf(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobConfXML() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList info=dom.getElementsByTagName("conf"); verifyAMJobConfXML(info,jobsMap.get(id)); } }

    Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesJobs

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobCountersDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters/").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobCounters"); verifyAMJobCounters(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobAttempts() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobAttempts"); verifyJobAttempts(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobIdSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("job"); verifyAMJob(info,jobsMap.get(id)); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobIdInvalidXML() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_XML).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String msg=response.getEntity(String.class); System.out.println(msg); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(msg)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("RemoteException"); Element element=(Element)nodes.item(0); String message=WebServicesTestUtils.getXmlString(element,"message"); String type=WebServicesTestUtils.getXmlString(element,"exception"); String classname=WebServicesTestUtils.getXmlString(element,"javaClassName"); verifyJobIdInvalid(message,type,classname); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobIdInvalid() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyJobIdInvalid(message,type,classname); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobCountersXML() throws Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList info=dom.getElementsByTagName("jobCounters"); verifyAMJobCountersXML(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobAttemptsDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobAttempts"); verifyJobAttempts(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobsXML() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList jobs=dom.getElementsByTagName("jobs"); assertEquals("incorrect number of elements",1,jobs.getLength()); NodeList job=dom.getElementsByTagName("job"); assertEquals("incorrect number of elements",1,job.getLength()); verifyAMJobXML(job,appContext); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobCounters() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobCounters"); verifyAMJobCounters(info,jobsMap.get(id)); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); JSONObject info=arr.getJSONObject(0); Job job=appContext.getJob(MRApps.toJobID(info.getString("id"))); verifyAMJob(info,job); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); JSONObject info=arr.getJSONObject(0); Job job=appContext.getJob(MRApps.toJobID(info.getString("id"))); verifyAMJob(info,job); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobIdDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("job"); verifyAMJob(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobCountersSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobCounters"); verifyAMJobCounters(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobAttemptsXML() throws Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList attempts=dom.getElementsByTagName("jobAttempts"); assertEquals("incorrect number of elements",1,attempts.getLength()); NodeList info=dom.getElementsByTagName("jobAttempt"); verifyJobAttemptsXML(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobId() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("job"); verifyAMJob(info,jobsMap.get(id)); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobIdInvalidDefault() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyJobIdInvalid(message,type,classname); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobIdNonExist() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_0_1234").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: job, job_0_1234, is not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobs() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); JSONObject info=arr.getJSONObject(0); Job job=appContext.getJob(MRApps.toJobID(info.getString("id"))); verifyAMJob(info,job); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobAttemptsSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("jobattempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobAttempts"); verifyJobAttempts(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobIdXML() throws Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList job=dom.getElementsByTagName("job"); verifyAMJobXML(job,appContext); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobIdInvalidBogus() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("mapreduce").path("jobs").path("bogusfoo").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: JobId string : bogusfoo is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

    Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAMWebServicesTasks

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdCountersSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobTaskCounters"); verifyAMJobTaskCounters(info,task); } } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskIdInvalid() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_d_000000"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Bad TaskType identifier. TaskId string : " + "task_0_0000_d_000000 is not properly formed.",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("task"); verifyAMSingleTask(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasksQueryMap() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String type="m"; ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject tasks=json.getJSONObject("tasks"); JSONArray arr=tasks.getJSONArray("task"); assertEquals("incorrect number of elements",1,arr.length()); verifyAMTask(arr,jobsMap.get(id),type); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskIdBogus() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="bogustaskid"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "bogustaskid is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTasksQueryInvalid() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tasktype="reduce"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",tasktype).accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: tasktype must be either m or r",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskIdNonExist() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_m_000000"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: task not found with id task_0_0000_m_000000",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobTaskCountersXML() throws Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList info=dom.getElementsByTagName("jobTaskCounters"); verifyAMTaskCountersXML(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasksXML() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList tasks=dom.getElementsByTagName("tasks"); assertEquals("incorrect number of elements",1,tasks.getLength()); NodeList task=dom.getElementsByTagName("task"); verifyAMTaskXML(task,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdCounters() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobTaskCounters"); verifyAMJobTaskCounters(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasks() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject tasks=json.getJSONObject("tasks"); JSONArray arr=tasks.getJSONArray("task"); assertEquals("incorrect number of elements",2,arr.length()); verifyAMTask(arr,jobsMap.get(id),null); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasksSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject tasks=json.getJSONObject("tasks"); JSONArray arr=tasks.getJSONArray("task"); assertEquals("incorrect number of elements",2,arr.length()); verifyAMTask(arr,jobsMap.get(id),null); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasksDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject tasks=json.getJSONObject("tasks"); JSONArray arr=tasks.getJSONArray("task"); assertEquals("incorrect number of elements",2,arr.length()); verifyAMTask(arr,jobsMap.get(id),null); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskId() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("task"); verifyAMSingleTask(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("task"); verifyAMSingleTask(info,task); } } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskIdInvalid3() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_m"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0_0000_m is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdCountersDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobTaskCounters"); verifyAMJobTaskCounters(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasksQueryReduce() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String type="r"; ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject tasks=json.getJSONObject("tasks"); JSONArray arr=tasks.getJSONArray("task"); assertEquals("incorrect number of elements",1,arr.length()); verifyAMTask(arr,jobsMap.get(id),type); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskIdInvalid2() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_m_000000"; try { r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0_m_000000 is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdXML() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("task"); for (int i=0; i < nodes.getLength(); i++) { Element element=(Element)nodes.item(i); verifyAMSingleTaskXML(element,task); } } } }

    Class: org.apache.hadoop.mapreduce.v2.app.webapp.TestAppController

    InternalCallVerifier EqualityVerifier 
    /** * Test method 'singleJobCounter'. Should set SingleCounterPage class for rendering */ @Test public void testGetSingleJobCounter() throws IOException { appController.singleJobCounter(); assertEquals(SingleCounterPage.class,appController.getClazz()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Test method 'singleTaskCounter'. Should set SingleCounterPage class for rendering */ @Test public void testGetSingleTaskCounter() throws IOException { appController.singleTaskCounter(); assertEquals(SingleCounterPage.class,appController.getClazz()); assertNotNull(appController.getProperty().get(AppController.COUNTER_GROUP)); assertNotNull(appController.getProperty().get(AppController.COUNTER_NAME)); }

    InternalCallVerifier EqualityVerifier 
    /** * Test method 'job'. Should print message about error or set JobPage class for rendering */ @Test public void testGetJob(){ when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false); appController.job(); verify(appController.response()).setContentType(MimeType.TEXT); assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData()); when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true); appController.getProperty().remove(AMParams.JOB_ID); appController.job(); assertEquals("Access denied: User user does not have permission to view job job_01_01Bad Request: Missing job ID",appController.getData()); appController.getProperty().put(AMParams.JOB_ID,"job_01_01"); appController.job(); assertEquals(JobPage.class,appController.getClazz()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test method 'tasks'. Should set TasksPage class for rendering */ @Test public void testTasks(){ appController.tasks(); assertEquals(TasksPage.class,appController.getClazz()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test method 'taskCounters'. Should print message about error or set CountersPage class for rendering */ @Test public void testGetTaskCounters(){ when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false); appController.taskCounters(); verify(appController.response()).setContentType(MimeType.TEXT); assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData()); when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true); appController.getProperty().remove(AMParams.TASK_ID); appController.taskCounters(); assertEquals("Access denied: User user does not have permission to view job job_01_01missing task ID",appController.getData()); appController.getProperty().put(AMParams.TASK_ID,"task_01_01_m01_01"); appController.taskCounters(); assertEquals(CountersPage.class,appController.getClazz()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test method 'conf'. Should set JobConfPage class for rendering */ @Test public void testConfiguration(){ appController.conf(); assertEquals(JobConfPage.class,appController.getClazz()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test method 'conf'. Should set AttemptsPage class for rendering or print information about error */ @Test public void testAttempts(){ appController.getProperty().remove(AMParams.TASK_TYPE); when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false); appController.attempts(); verify(appController.response()).setContentType(MimeType.TEXT); assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData()); when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true); appController.getProperty().remove(AMParams.TASK_ID); appController.attempts(); assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData()); appController.getProperty().put(AMParams.TASK_ID,"task_01_01_m01_01"); appController.attempts(); assertEquals("Bad request: missing task-type.",appController.getProperty().get("title")); appController.getProperty().put(AMParams.TASK_TYPE,"m"); appController.attempts(); assertEquals("Bad request: missing attempt-state.",appController.getProperty().get("title")); appController.getProperty().put(AMParams.ATTEMPT_STATE,"State"); appController.attempts(); assertEquals(AttemptsPage.class,appController.getClazz()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test method 'task'. Should set TaskPage class for rendering and information for title */ @Test public void testTask(){ appController.task(); assertEquals("Attempts for task_01_01_m01_01",appController.getProperty().get("title")); assertEquals(TaskPage.class,appController.getClazz()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Test the method 'info'. */ @Test public void testInfo(){ appController.info(); Iterator iterator=appController.getResponseInfo().iterator(); ResponseInfo.Item item=iterator.next(); assertEquals("Application ID:",item.key); assertEquals("application_0_0000",item.value); item=iterator.next(); assertEquals("Application Name:",item.key); assertEquals("AppName",item.value); item=iterator.next(); assertEquals("User:",item.key); assertEquals("User",item.value); item=iterator.next(); assertEquals("Started on:",item.key); item=iterator.next(); assertEquals("Elasped: ",item.key); }

    InternalCallVerifier EqualityVerifier 
    /** * Test method 'jobCounters'. Should print message about error or set CountersPage class for rendering */ @Test public void testGetJobCounters(){ when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(false); appController.jobCounters(); verify(appController.response()).setContentType(MimeType.TEXT); assertEquals("Access denied: User user does not have permission to view job job_01_01",appController.getData()); when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true); appController.getProperty().remove(AMParams.JOB_ID); appController.jobCounters(); assertEquals("Access denied: User user does not have permission to view job job_01_01Bad Request: Missing job ID",appController.getData()); appController.getProperty().put(AMParams.JOB_ID,"job_01_01"); appController.jobCounters(); assertEquals(CountersPage.class,appController.getClazz()); }

    Class: org.apache.hadoop.mapreduce.v2.hs.TestCompletedTask

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * test some methods of CompletedTaskAttempt */ @Test(timeout=5000) public void testCompletedTaskAttempt(){ TaskAttemptInfo attemptInfo=mock(TaskAttemptInfo.class); when(attemptInfo.getRackname()).thenReturn("Rackname"); when(attemptInfo.getShuffleFinishTime()).thenReturn(11L); when(attemptInfo.getSortFinishTime()).thenReturn(12L); when(attemptInfo.getShufflePort()).thenReturn(10); JobID jobId=new JobID("12345",0); TaskID taskId=new TaskID(jobId,TaskType.REDUCE,0); TaskAttemptID taskAttemptId=new TaskAttemptID(taskId,0); when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId); CompletedTaskAttempt taskAttemt=new CompletedTaskAttempt(null,attemptInfo); assertEquals("Rackname",taskAttemt.getNodeRackName()); assertEquals(Phase.CLEANUP,taskAttemt.getPhase()); assertTrue(taskAttemt.isFinished()); assertEquals(11L,taskAttemt.getShuffleFinishTime()); assertEquals(12L,taskAttemt.getSortFinishTime()); assertEquals(10,taskAttemt.getShufflePort()); }

    Class: org.apache.hadoop.mapreduce.v2.hs.TestHistoryServerFileSystemStateStoreService

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testUpdatedTokenRecovery() throws IOException { IOException intentionalErr=new IOException("intentional error"); FileSystem fs=FileSystem.getLocal(conf); final FileSystem spyfs=spy(fs); ArgumentMatcher updateTmpMatcher=new ArgumentMatcher(){ @Override public boolean matches( Object argument){ if (argument instanceof Path) { return ((Path)argument).getName().startsWith("update"); } return false; } } ; doThrow(intentionalErr).when(spyfs).rename(argThat(updateTmpMatcher),isA(Path.class)); conf.set(JHAdminConfig.MR_HS_FS_STATE_STORE_URI,testDir.getAbsoluteFile().toURI().toString()); HistoryServerStateStoreService store=new HistoryServerFileSystemStateStoreService(){ @Override FileSystem createFileSystem() throws IOException { return spyfs; } } ; store.init(conf); store.start(); final MRDelegationTokenIdentifier token1=new MRDelegationTokenIdentifier(new Text("tokenOwner1"),new Text("tokenRenewer1"),new Text("tokenUser1")); token1.setSequenceNumber(1); final Long tokenDate1=1L; store.storeToken(token1,tokenDate1); final Long newTokenDate1=975318642L; try { store.updateToken(token1,newTokenDate1); fail("intentional error not thrown"); } catch ( IOException e) { assertEquals(intentionalErr,e); } store.close(); store=createAndStartStore(); HistoryServerState state=store.loadState(); assertEquals("incorrect loaded token count",1,state.tokenState.size()); assertTrue("missing token 1",state.tokenState.containsKey(token1)); assertEquals("incorrect token 1 date",newTokenDate1,state.tokenState.get(token1)); store.close(); }

    Class: org.apache.hadoop.mapreduce.v2.hs.TestJHSDelegationTokenSecretManager

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRecovery() throws IOException { Configuration conf=new Configuration(); HistoryServerStateStoreService store=new HistoryServerMemStateStoreService(); store.init(conf); store.start(); JHSDelegationTokenSecretManagerForTest mgr=new JHSDelegationTokenSecretManagerForTest(store); mgr.startThreads(); MRDelegationTokenIdentifier tokenId1=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser")); Token token1=new Token(tokenId1,mgr); MRDelegationTokenIdentifier tokenId2=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser")); Token token2=new Token(tokenId2,mgr); DelegationKey[] keys=mgr.getAllKeys(); long tokenRenewDate1=mgr.getAllTokens().get(tokenId1).getRenewDate(); long tokenRenewDate2=mgr.getAllTokens().get(tokenId2).getRenewDate(); mgr.stopThreads(); mgr=new JHSDelegationTokenSecretManagerForTest(store); mgr.recover(store.loadState()); List recoveredKeys=Arrays.asList(mgr.getAllKeys()); for ( DelegationKey key : keys) { assertTrue("key missing after recovery",recoveredKeys.contains(key)); } assertTrue("token1 missing",mgr.getAllTokens().containsKey(tokenId1)); assertEquals("token1 renew date",tokenRenewDate1,mgr.getAllTokens().get(tokenId1).getRenewDate()); assertTrue("token2 missing",mgr.getAllTokens().containsKey(tokenId2)); assertEquals("token2 renew date",tokenRenewDate2,mgr.getAllTokens().get(tokenId2).getRenewDate()); mgr.startThreads(); mgr.verifyToken(tokenId1,token1.getPassword()); mgr.verifyToken(tokenId2,token2.getPassword()); MRDelegationTokenIdentifier tokenId3=new MRDelegationTokenIdentifier(new Text("tokenOwner"),new Text("tokenRenewer"),new Text("tokenUser")); Token token3=new Token(tokenId3,mgr); assertEquals("sequence number restore",tokenId2.getSequenceNumber() + 1,tokenId3.getSequenceNumber()); mgr.cancelToken(token1,"tokenOwner"); MRDelegationTokenIdentifier tokenIdFull=new MRDelegationTokenIdentifier(new Text("tokenOwner/localhost@LOCALHOST"),new Text("tokenRenewer"),new Text("tokenUser")); KerberosName.setRules("RULE:[1:$1]\nRULE:[2:$1]"); Token tokenFull=new Token(tokenIdFull,mgr); try { mgr.cancelToken(tokenFull,"tokenOwner"); } catch ( AccessControlException ace) { assertTrue(ace.getMessage().contains("is not authorized to cancel the token")); } mgr.cancelToken(tokenFull,tokenIdFull.getOwner().toString()); long tokenRenewDate3=mgr.getAllTokens().get(tokenId3).getRenewDate(); mgr.stopThreads(); mgr=new JHSDelegationTokenSecretManagerForTest(store); mgr.recover(store.loadState()); assertFalse("token1 should be missing",mgr.getAllTokens().containsKey(tokenId1)); assertTrue("token2 missing",mgr.getAllTokens().containsKey(tokenId2)); assertEquals("token2 renew date",tokenRenewDate2,mgr.getAllTokens().get(tokenId2).getRenewDate()); assertTrue("token3 missing",mgr.getAllTokens().containsKey(tokenId3)); assertEquals("token3 renew date",tokenRenewDate3,mgr.getAllTokens().get(tokenId3).getRenewDate()); mgr.startThreads(); mgr.verifyToken(tokenId2,token2.getPassword()); mgr.verifyToken(tokenId3,token3.getPassword()); mgr.stopThreads(); }

    Class: org.apache.hadoop.mapreduce.v2.hs.TestJobHistory

    InternalCallVerifier EqualityVerifier 
    @Test public void testRefreshJobRetentionSettings() throws IOException, InterruptedException { String root="mockfs://foo/"; String historyDoneDir=root + "mapred/history/done"; long now=System.currentTimeMillis(); long someTimeYesterday=now - (25l * 3600 * 1000); long timeBefore200Secs=now - (200l * 1000); String timestampComponent=JobHistoryUtils.timestampDirectoryComponent(someTimeYesterday); Path donePathYesterday=new Path(historyDoneDir,timestampComponent + "/" + "000000"); FileStatus dirCreatedYesterdayStatus=new FileStatus(0,true,0,0,someTimeYesterday,donePathYesterday); timestampComponent=JobHistoryUtils.timestampDirectoryComponent(timeBefore200Secs); Path donePathToday=new Path(historyDoneDir,timestampComponent + "/" + "000000"); FileStatus dirCreatedTodayStatus=new FileStatus(0,true,0,0,timeBefore200Secs,donePathToday); Path fileUnderYesterdayDir=new Path(donePathYesterday.toString(),"job_1372363578825_0015-" + someTimeYesterday + "-user-Sleep+job-"+ someTimeYesterday+ "-1-1-SUCCEEDED-default.jhist"); FileStatus fileUnderYesterdayDirStatus=new FileStatus(10,false,0,0,someTimeYesterday,fileUnderYesterdayDir); Path fileUnderTodayDir=new Path(donePathYesterday.toString(),"job_1372363578825_0016-" + timeBefore200Secs + "-user-Sleep+job-"+ timeBefore200Secs+ "-1-1-SUCCEEDED-default.jhist"); FileStatus fileUnderTodayDirStatus=new FileStatus(10,false,0,0,timeBefore200Secs,fileUnderTodayDir); HistoryFileManager historyManager=spy(new HistoryFileManager()); jobHistory=spy(new JobHistory()); List fileStatusList=new LinkedList(); fileStatusList.add(dirCreatedYesterdayStatus); fileStatusList.add(dirCreatedTodayStatus); doReturn(4).when(jobHistory).getInitDelaySecs(); doReturn(historyManager).when(jobHistory).createHistoryFileManager(); List list1=new LinkedList(); list1.add(fileUnderYesterdayDirStatus); doReturn(list1).when(historyManager).scanDirectoryForHistoryFiles(eq(donePathYesterday),any(FileContext.class)); List list2=new LinkedList(); list2.add(fileUnderTodayDirStatus); doReturn(list2).when(historyManager).scanDirectoryForHistoryFiles(eq(donePathToday),any(FileContext.class)); doReturn(fileStatusList).when(historyManager).getHistoryDirsForCleaning(Mockito.anyLong()); doReturn(true).when(historyManager).deleteDir(any(FileStatus.class)); JobListCache jobListCache=mock(JobListCache.class); HistoryFileInfo fileInfo=mock(HistoryFileInfo.class); doReturn(jobListCache).when(historyManager).createJobListCache(); when(jobListCache.get(any(JobId.class))).thenReturn(fileInfo); doNothing().when(fileInfo).delete(); Configuration conf=new Configuration(); conf.setLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS,24l * 3600 * 1000); conf.setLong(JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS,2 * 1000); jobHistory.init(conf); jobHistory.start(); assertEquals(2 * 1000l,jobHistory.getCleanerInterval()); verify(fileInfo,timeout(20000).times(1)).delete(); fileStatusList.remove(dirCreatedYesterdayStatus); conf.setLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS,10 * 1000); conf.setLong(JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS,1 * 1000); doReturn(conf).when(jobHistory).createConf(); jobHistory.refreshJobRetentionSettings(); assertEquals(1 * 1000l,jobHistory.getCleanerInterval()); verify(fileInfo,timeout(20000).times(2)).delete(); }

    Class: org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEntities

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Simple test of some methods of CompletedJob * @throws Exception */ @Test(timeout=30000) public void testGetTaskAttemptCompletionEvent() throws Exception { HistoryFileInfo info=mock(HistoryFileInfo.class); when(info.getConfFile()).thenReturn(fullConfPath); completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager); TaskCompletionEvent[] events=completedJob.getMapAttemptCompletionEvents(0,1000); assertEquals(10,completedJob.getMapAttemptCompletionEvents(0,10).length); int currentEventId=0; for ( TaskCompletionEvent taskAttemptCompletionEvent : events) { int eventId=taskAttemptCompletionEvent.getEventId(); assertTrue(eventId >= currentEventId); currentEventId=eventId; } assertNull(completedJob.loadConfFile()); assertEquals("Sleep job",completedJob.getName()); assertEquals("default",completedJob.getQueueName()); assertEquals(1.0,completedJob.getProgress(),0.001); assertEquals(12,completedJob.getTaskAttemptCompletionEvents(0,1000).length); assertEquals(10,completedJob.getTaskAttemptCompletionEvents(0,10).length); assertEquals(7,completedJob.getTaskAttemptCompletionEvents(5,10).length); assertEquals(1,completedJob.getDiagnostics().size()); assertEquals("",completedJob.getDiagnostics().get(0)); assertEquals(0,completedJob.getJobACLs().size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=10000) public void testCompletedTask() throws Exception { HistoryFileInfo info=mock(HistoryFileInfo.class); when(info.getConfFile()).thenReturn(fullConfPath); completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager); TaskId mt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.MAP); TaskId rt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.REDUCE); Map mapTasks=completedJob.getTasks(TaskType.MAP); Map reduceTasks=completedJob.getTasks(TaskType.REDUCE); assertEquals(10,mapTasks.size()); assertEquals(2,reduceTasks.size()); Task mt1=mapTasks.get(mt1Id); assertEquals(1,mt1.getAttempts().size()); assertEquals(TaskState.SUCCEEDED,mt1.getState()); TaskReport mt1Report=mt1.getReport(); assertEquals(TaskState.SUCCEEDED,mt1Report.getTaskState()); assertEquals(mt1Id,mt1Report.getTaskId()); Task rt1=reduceTasks.get(rt1Id); assertEquals(1,rt1.getAttempts().size()); assertEquals(TaskState.SUCCEEDED,rt1.getState()); TaskReport rt1Report=rt1.getReport(); assertEquals(TaskState.SUCCEEDED,rt1Report.getTaskState()); assertEquals(rt1Id,rt1Report.getTaskId()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=10000) public void testCompletedTaskAttempt() throws Exception { HistoryFileInfo info=mock(HistoryFileInfo.class); when(info.getConfFile()).thenReturn(fullConfPath); completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager); TaskId mt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.MAP); TaskId rt1Id=MRBuilderUtils.newTaskId(jobId,0,TaskType.REDUCE); TaskAttemptId mta1Id=MRBuilderUtils.newTaskAttemptId(mt1Id,0); TaskAttemptId rta1Id=MRBuilderUtils.newTaskAttemptId(rt1Id,0); Task mt1=completedJob.getTask(mt1Id); Task rt1=completedJob.getTask(rt1Id); TaskAttempt mta1=mt1.getAttempt(mta1Id); assertEquals(TaskAttemptState.SUCCEEDED,mta1.getState()); assertEquals("localhost:45454",mta1.getAssignedContainerMgrAddress()); assertEquals("localhost:9999",mta1.getNodeHttpAddress()); TaskAttemptReport mta1Report=mta1.getReport(); assertEquals(TaskAttemptState.SUCCEEDED,mta1Report.getTaskAttemptState()); assertEquals("localhost",mta1Report.getNodeManagerHost()); assertEquals(45454,mta1Report.getNodeManagerPort()); assertEquals(9999,mta1Report.getNodeManagerHttpPort()); TaskAttempt rta1=rt1.getAttempt(rta1Id); assertEquals(TaskAttemptState.SUCCEEDED,rta1.getState()); assertEquals("localhost:45454",rta1.getAssignedContainerMgrAddress()); assertEquals("localhost:9999",rta1.getNodeHttpAddress()); TaskAttemptReport rta1Report=rta1.getReport(); assertEquals(TaskAttemptState.SUCCEEDED,rta1Report.getTaskAttemptState()); assertEquals("localhost",rta1Report.getNodeManagerHost()); assertEquals(45454,rta1Report.getNodeManagerPort()); assertEquals(9999,rta1Report.getNodeManagerHttpPort()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=100000) public void testCompletedJob() throws Exception { HistoryFileInfo info=mock(HistoryFileInfo.class); when(info.getConfFile()).thenReturn(fullConfPath); completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,loadTasks,"user",info,jobAclsManager); assertEquals(loadTasks,completedJob.tasksLoaded.get()); assertEquals(1,completedJob.getAMInfos().size()); assertEquals(10,completedJob.getCompletedMaps()); assertEquals(1,completedJob.getCompletedReduces()); assertEquals(12,completedJob.getTasks().size()); assertEquals(true,completedJob.tasksLoaded.get()); assertEquals(10,completedJob.getTasks(TaskType.MAP).size()); assertEquals(2,completedJob.getTasks(TaskType.REDUCE).size()); assertEquals("user",completedJob.getUserName()); assertEquals(JobState.SUCCEEDED,completedJob.getState()); JobReport jobReport=completedJob.getReport(); assertEquals("user",jobReport.getUser()); assertEquals(JobState.SUCCEEDED,jobReport.getJobState()); }

    Class: org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents

    InternalCallVerifier EqualityVerifier 
    /** * Verify that all the events are flushed on stopping the HistoryHandler * @throws Exception */ @Test public void testEventsFlushOnStop() throws Exception { Configuration conf=new Configuration(); MRApp app=new MRAppWithSpecialHistoryHandler(1,0,true,this.getClass().getName(),true); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString()); app.waitForState(job,JobState.SUCCEEDED); app.waitForState(Service.STATE.STOPPED); HistoryContext context=new JobHistory(); ((JobHistory)context).init(conf); Job parsedJob=context.getJob(jobId); Assert.assertEquals("CompletedMaps not correct",1,parsedJob.getCompletedMaps()); Map tasks=parsedJob.getTasks(); Assert.assertEquals("No of tasks not correct",1,tasks.size()); verifyTask(tasks.values().iterator().next()); Map maps=parsedJob.getTasks(TaskType.MAP); Assert.assertEquals("No of maps not correct",1,maps.size()); Assert.assertEquals("Job state not currect",JobState.SUCCEEDED,parsedJob.getState()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testHistoryEvents() throws Exception { Configuration conf=new Configuration(); MRApp app=new MRAppWithHistory(2,1,true,this.getClass().getName(),true); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString()); app.waitForState(job,JobState.SUCCEEDED); app.waitForState(Service.STATE.STOPPED); HistoryContext context=new JobHistory(); ((JobHistory)context).init(conf); ((JobHistory)context).start(); Assert.assertTrue(context.getStartTime() > 0); Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED); Job parsedJob=context.getJob(jobId); ((JobHistory)context).stop(); Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED); Assert.assertEquals("CompletedMaps not correct",2,parsedJob.getCompletedMaps()); Assert.assertEquals(System.getProperty("user.name"),parsedJob.getUserName()); Map tasks=parsedJob.getTasks(); Assert.assertEquals("No of tasks not correct",3,tasks.size()); for ( Task task : tasks.values()) { verifyTask(task); } Map maps=parsedJob.getTasks(TaskType.MAP); Assert.assertEquals("No of maps not correct",2,maps.size()); Map reduces=parsedJob.getTasks(TaskType.REDUCE); Assert.assertEquals("No of reduces not correct",1,reduces.size()); Assert.assertEquals("CompletedReduce not correct",1,parsedJob.getCompletedReduces()); Assert.assertEquals("Job state not currect",JobState.SUCCEEDED,parsedJob.getState()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testAssignedQueue() throws Exception { Configuration conf=new Configuration(); MRApp app=new MRAppWithHistory(2,1,true,this.getClass().getName(),true,"assignedQueue"); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString()); app.waitForState(job,JobState.SUCCEEDED); app.waitForState(Service.STATE.STOPPED); HistoryContext context=new JobHistory(); ((JobHistory)context).init(conf); ((JobHistory)context).start(); Assert.assertTrue(context.getStartTime() > 0); Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED); Job parsedJob=context.getJob(jobId); ((JobHistory)context).stop(); Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED); Assert.assertEquals("QueueName not correct","assignedQueue",parsedJob.getQueueName()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobHistoryEventHandlerIsFirstServiceToStop(){ MRApp app=new MRAppWithSpecialHistoryHandler(1,0,true,this.getClass().getName(),true); Configuration conf=new Configuration(); app.init(conf); Service[] services=app.getServices().toArray(new Service[0]); Assert.assertEquals("JobHistoryEventHandler",services[services.length - 1].getName()); }

    Class: org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryParsing

    InternalCallVerifier EqualityVerifier 
    @Test public void testFailedJobHistoryWithoutDiagnostics() throws Exception { final Path histPath=new Path(getClass().getClassLoader().getResource("job_1393307629410_0001-1393307687476-user-Sleep+job-1393307723835-0-0-FAILED-default-1393307693920.jhist").getFile()); final FileSystem lfs=FileSystem.getLocal(new Configuration()); final FSDataInputStream fsdis=lfs.open(histPath); try { JobHistoryParser parser=new JobHistoryParser(fsdis); JobInfo info=parser.parse(); assertEquals("History parsed jobId incorrectly",info.getJobId(),JobID.forName("job_1393307629410_0001")); assertEquals("Default diagnostics incorrect ","",info.getErrorInfo()); } finally { fsdis.close(); } }

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Simple test PartialJob */ @Test(timeout=1000) public void testPartialJob() throws Exception { JobId jobId=new JobIdPBImpl(); jobId.setId(0); JobIndexInfo jii=new JobIndexInfo(0L,System.currentTimeMillis(),"user","jobName",jobId,3,2,"JobStatus"); PartialJob test=new PartialJob(jii,jobId); Assert.assertEquals(1.0f,test.getProgress(),0.001f); assertNull(test.getAllCounters()); assertNull(test.getTasks()); assertNull(test.getTasks(TaskType.MAP)); assertNull(test.getTask(new TaskIdPBImpl())); assertNull(test.getTaskAttemptCompletionEvents(0,100)); assertNull(test.getMapAttemptCompletionEvents(0,100)); assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(),null)); assertNull(test.getAMInfos()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Simple test some methods of JobHistory */ @Test(timeout=20000) public void testJobHistoryMethods() throws Exception { LOG.info("STARTING testJobHistoryMethods"); try { Configuration configuration=new Configuration(); configuration.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(configuration); MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true); app.submit(configuration); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString()); app.waitForState(job,JobState.SUCCEEDED); JobHistory jobHistory=new JobHistory(); jobHistory.init(configuration); Assert.assertEquals(1,jobHistory.getAllJobs().size()); Assert.assertEquals(1,jobHistory.getAllJobs(app.getAppID()).size()); JobsInfo jobsinfo=jobHistory.getPartialJobs(0L,10L,null,"default",0L,System.currentTimeMillis() + 1,0L,System.currentTimeMillis() + 1,JobState.SUCCEEDED); Assert.assertEquals(1,jobsinfo.getJobs().size()); Assert.assertNotNull(jobHistory.getApplicationAttemptId()); Assert.assertEquals("application_0_0000",jobHistory.getApplicationID().toString()); Assert.assertEquals("Job History Server",jobHistory.getApplicationName()); Assert.assertNull(jobHistory.getEventHandler()); Assert.assertNull(jobHistory.getClock()); Assert.assertNull(jobHistory.getClusterInfo()); } finally { LOG.info("FINISHED testJobHistoryMethods"); } }

    EqualityVerifier 
    @Test(timeout=50000) public void testJobInfo() throws Exception { JobInfo info=new JobInfo(); Assert.assertEquals("NORMAL",info.getPriority()); info.printAll(); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testHistoryParsingForFailedAttempts() throws Exception { LOG.info("STARTING testHistoryParsingForFailedAttempts"); try { Configuration conf=new Configuration(); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(conf); MRApp app=new MRAppWithHistoryWithFailedAttempt(2,1,true,this.getClass().getName(),true); app.submit(conf); Job job=app.getContext().getAllJobs().values().iterator().next(); JobId jobId=job.getID(); app.waitForState(job,JobState.SUCCEEDED); app.waitForState(Service.STATE.STOPPED); JobHistory jobHistory=new JobHistory(); jobHistory.init(conf); HistoryFileInfo fileInfo=jobHistory.getJobFileInfo(jobId); JobHistoryParser parser; JobInfo jobInfo; synchronized (fileInfo) { Path historyFilePath=fileInfo.getHistoryFile(); FSDataInputStream in=null; FileContext fc=null; try { fc=FileContext.getFileContext(conf); in=fc.open(fc.makeQualified(historyFilePath)); } catch ( IOException ioe) { LOG.info("Can not open history file: " + historyFilePath,ioe); throw (new Exception("Can not open History File")); } parser=new JobHistoryParser(in); jobInfo=parser.parse(); } Exception parseException=parser.getParseException(); Assert.assertNull("Caught an expected exception " + parseException,parseException); int noOffailedAttempts=0; Map allTasks=jobInfo.getAllTasks(); for ( Task task : job.getTasks().values()) { TaskInfo taskInfo=allTasks.get(TypeConverter.fromYarn(task.getID())); for ( TaskAttempt taskAttempt : task.getAttempts().values()) { TaskAttemptInfo taskAttemptInfo=taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn((taskAttempt.getID()))); Assert.assertEquals("rack-name is incorrect",taskAttemptInfo.getRackname(),RACK_NAME); if (taskAttemptInfo.getTaskStatus().equals("FAILED")) { noOffailedAttempts++; } } } Assert.assertEquals("No of Failed tasks doesn't match.",2,noOffailedAttempts); } finally { LOG.info("FINISHED testHistoryParsingForFailedAttempts"); } }

    Class: org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryServer

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=50000) public void testReports() throws Exception { Configuration config=new Configuration(); config.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(config); MRApp app=new MRAppWithHistory(1,1,true,this.getClass().getName(),true); app.submit(config); Job job=app.getContext().getAllJobs().values().iterator().next(); app.waitForState(job,JobState.SUCCEEDED); historyServer=new JobHistoryServer(); historyServer.init(config); historyServer.start(); JobHistory jobHistory=null; for ( Service service : historyServer.getServices()) { if (service instanceof JobHistory) { jobHistory=(JobHistory)service; } } ; Map jobs=jobHistory.getAllJobs(); assertEquals(1,jobs.size()); assertEquals("job_0_0000",jobs.keySet().iterator().next().toString()); Task task=job.getTasks().values().iterator().next(); TaskAttempt attempt=task.getAttempts().values().iterator().next(); HistoryClientService historyService=historyServer.getClientService(); MRClientProtocol protocol=historyService.getClientHandler(); GetTaskAttemptReportRequest gtarRequest=recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class); TaskAttemptId taId=attempt.getID(); taId.setTaskId(task.getID()); taId.getTaskId().setJobId(job.getID()); gtarRequest.setTaskAttemptId(taId); GetTaskAttemptReportResponse response=protocol.getTaskAttemptReport(gtarRequest); assertEquals("container_0_0000_01_000000",response.getTaskAttemptReport().getContainerId().toString()); assertTrue(response.getTaskAttemptReport().getDiagnosticInfo().isEmpty()); assertNotNull(response.getTaskAttemptReport().getCounters().getCounter(TaskCounter.PHYSICAL_MEMORY_BYTES)); assertEquals(taId.toString(),response.getTaskAttemptReport().getTaskAttemptId().toString()); GetTaskReportRequest request=recordFactory.newRecordInstance(GetTaskReportRequest.class); TaskId taskId=task.getID(); taskId.setJobId(job.getID()); request.setTaskId(taskId); GetTaskReportResponse reportResponse=protocol.getTaskReport(request); assertEquals("",reportResponse.getTaskReport().getDiagnosticsList().iterator().next()); assertEquals(1.0f,reportResponse.getTaskReport().getProgress(),0.01); assertEquals(taskId.toString(),reportResponse.getTaskReport().getTaskId().toString()); assertEquals(TaskState.SUCCEEDED,reportResponse.getTaskReport().getTaskState()); GetTaskAttemptCompletionEventsRequest taskAttemptRequest=recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class); taskAttemptRequest.setJobId(job.getID()); GetTaskAttemptCompletionEventsResponse taskAttemptCompletionEventsResponse=protocol.getTaskAttemptCompletionEvents(taskAttemptRequest); assertEquals(0,taskAttemptCompletionEventsResponse.getCompletionEventCount()); GetDiagnosticsRequest diagnosticRequest=recordFactory.newRecordInstance(GetDiagnosticsRequest.class); diagnosticRequest.setTaskAttemptId(taId); GetDiagnosticsResponse diagnosticResponse=protocol.getDiagnostics(diagnosticRequest); assertEquals(1,diagnosticResponse.getDiagnosticsCount()); assertEquals("",diagnosticResponse.getDiagnostics(0)); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testLaunch() throws Exception { ExitUtil.disableSystemExit(); try { historyServer=JobHistoryServer.launchJobHistoryServer(new String[0]); } catch ( ExitUtil.ExitException e) { assertEquals(0,e.status); ExitUtil.resetFirstExitException(); fail(); } }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=50000) public void testStartStopServer() throws Exception { historyServer=new JobHistoryServer(); Configuration config=new Configuration(); historyServer.init(config); assertEquals(STATE.INITED,historyServer.getServiceState()); assertEquals(6,historyServer.getServices().size()); HistoryClientService historyService=historyServer.getClientService(); assertNotNull(historyServer.getClientService()); assertEquals(STATE.INITED,historyService.getServiceState()); historyServer.start(); assertEquals(STATE.STARTED,historyServer.getServiceState()); assertEquals(STATE.STARTED,historyService.getServiceState()); historyServer.stop(); assertEquals(STATE.STOPPED,historyServer.getServiceState()); assertNotNull(historyService.getClientHandler().getConnectAddress()); }

    Class: org.apache.hadoop.mapreduce.v2.hs.TestJobIdHistoryFileInfoMap

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Trivial test case that verifies basic functionality of {@link JobIdHistoryFileInfoMap} */ @Test(timeout=2000) public void testWithSingleElement() throws InterruptedException { JobIdHistoryFileInfoMap mapWithSize=new JobIdHistoryFileInfoMap(); JobId jobId=MRBuilderUtils.newJobId(1,1,1); HistoryFileInfo fileInfo1=Mockito.mock(HistoryFileInfo.class); Mockito.when(fileInfo1.getJobId()).thenReturn(jobId); assertEquals("Incorrect return on putIfAbsent()",null,mapWithSize.putIfAbsent(jobId,fileInfo1)); assertEquals("Incorrect return on putIfAbsent()",fileInfo1,mapWithSize.putIfAbsent(jobId,fileInfo1)); assertEquals("Incorrect get()",fileInfo1,mapWithSize.get(jobId)); assertTrue("Incorrect size()",checkSize(mapWithSize,1)); NavigableSet set=mapWithSize.navigableKeySet(); assertEquals("Incorrect navigableKeySet()",1,set.size()); assertTrue("Incorrect navigableKeySet()",set.contains(jobId)); Collection values=mapWithSize.values(); assertEquals("Incorrect values()",1,values.size()); assertTrue("Incorrect values()",values.contains(fileInfo1)); }

    Class: org.apache.hadoop.mapreduce.v2.hs.TestJobListCache

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=1000) public void testAddExisting(){ JobListCache cache=new JobListCache(2,1000); JobId jobId=MRBuilderUtils.newJobId(1,1,1); HistoryFileInfo fileInfo=Mockito.mock(HistoryFileInfo.class); Mockito.when(fileInfo.getJobId()).thenReturn(jobId); cache.addIfAbsent(fileInfo); cache.addIfAbsent(fileInfo); assertEquals("Incorrect number of cache entries",1,cache.values().size()); }

    Class: org.apache.hadoop.mapreduce.v2.hs.server.TestHSAdminServer

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetGroups() throws Exception { String user=UserGroupInformation.getCurrentUser().getUserName(); String[] args=new String[2]; args[0]="-getGroups"; args[1]=user; int exitCode=hsAdminClient.run(args); assertEquals("Exit code should be 0 but was: " + exitCode,0,exitCode); }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRefreshUserToGroupsMappings() throws Exception { String[] args=new String[]{"-refreshUserToGroupsMappings"}; Groups groups=Groups.getUserToGroupsMappingService(conf); String user=UserGroupInformation.getCurrentUser().getUserName(); System.out.println("first attempt:"); List g1=groups.getGroups(user); String[] str_groups=new String[g1.size()]; g1.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); System.out.println("second attempt, should be same:"); List g2=groups.getGroups(user); g2.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); for (int i=0; i < g2.size(); i++) { assertEquals("Should be same group ",g1.get(i),g2.get(i)); } hsAdminClient.run(args); System.out.println("third attempt(after refresh command), should be different:"); List g3=groups.getGroups(user); g3.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); for (int i=0; i < g3.size(); i++) { assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i))); } }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestBlocks

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * test HsController */ @Test public void testHsController() throws Exception { AppContext ctx=mock(AppContext.class); ApplicationId appId=ApplicationIdPBImpl.newInstance(0,5); when(ctx.getApplicationID()).thenReturn(appId); AppForTest app=new AppForTest(ctx); Configuration config=new Configuration(); RequestContext requestCtx=mock(RequestContext.class); HsControllerForTest controller=new HsControllerForTest(app,config,requestCtx); controller.index(); assertEquals("JobHistory",controller.get(Params.TITLE,"")); assertEquals(HsJobPage.class,controller.jobPage()); assertEquals(HsCountersPage.class,controller.countersPage()); assertEquals(HsTasksPage.class,controller.tasksPage()); assertEquals(HsTaskPage.class,controller.taskPage()); assertEquals(HsAttemptsPage.class,controller.attemptsPage()); controller.set(AMParams.JOB_ID,"job_01_01"); controller.set(AMParams.TASK_ID,"task_01_01_m01_01"); controller.set(AMParams.TASK_TYPE,"m"); controller.set(AMParams.ATTEMPT_STATE,"State"); Job job=mock(Job.class); Task task=mock(Task.class); when(job.getTask(any(TaskId.class))).thenReturn(task); JobId jobID=MRApps.toJobID("job_01_01"); when(ctx.getJob(jobID)).thenReturn(job); when(job.checkAccess(any(UserGroupInformation.class),any(JobACL.class))).thenReturn(true); controller.job(); assertEquals(HsJobPage.class,controller.getClazz()); controller.jobCounters(); assertEquals(HsCountersPage.class,controller.getClazz()); controller.taskCounters(); assertEquals(HsCountersPage.class,controller.getClazz()); controller.tasks(); assertEquals(HsTasksPage.class,controller.getClazz()); controller.task(); assertEquals(HsTaskPage.class,controller.getClazz()); controller.attempts(); assertEquals(HsAttemptsPage.class,controller.getClazz()); assertEquals(HsConfPage.class,controller.confPage()); assertEquals(HsAboutPage.class,controller.aboutPage()); controller.about(); assertEquals(HsAboutPage.class,controller.getClazz()); controller.logs(); assertEquals(HsLogsPage.class,controller.getClazz()); controller.nmlogs(); assertEquals(AggregatedLogsPage.class,controller.getClazz()); assertEquals(HsSingleCounterPage.class,controller.singleCounterPage()); controller.singleJobCounter(); assertEquals(HsSingleCounterPage.class,controller.getClazz()); controller.singleTaskCounter(); assertEquals(HsSingleCounterPage.class,controller.getClazz()); }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHSWebApp

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppControllerIndex(){ MockAppContext ctx=new MockAppContext(0,1,1,1); Injector injector=WebAppTests.createMockInjector(AppContext.class,ctx); HsController controller=injector.getInstance(HsController.class); controller.index(); assertEquals(ctx.getApplicationID().toString(),controller.get(APP_ID,"")); }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServices

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfoDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("info/").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyHSInfo(json.getJSONObject("historyInfo"),appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testHSDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history/").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyHSInfo(json.getJSONObject("historyInfo"),appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfoXML() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("info/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); verifyHSInfoXML(xml,appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfo() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("info").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyHSInfo(json.getJSONObject("historyInfo"),appContext); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidAccept() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("history").accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidUri2() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("invalid").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testHSXML() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); verifyHSInfoXML(xml,appContext); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidUri() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("history").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfoSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("info/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyHSInfo(json.getJSONObject("historyInfo"),appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testHSSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyHSInfo(json.getJSONObject("historyInfo"),appContext); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testHS() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); verifyHSInfo(json.getJSONObject("historyInfo"),appContext); }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAcls

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetJobTaskAttemptIdAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobTaskAttemptId(hsr,this.jobIdStr,this.taskIdStr,this.taskAttemptIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobTaskAttemptId(hsr,this.jobIdStr,this.taskIdStr,this.taskAttemptIdStr); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetJobAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJob(hsr,jobIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJob(hsr,jobIdStr); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetJobTaskAttemptsAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobTaskAttempts(hsr,this.jobIdStr,this.taskIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobTaskAttempts(hsr,this.jobIdStr,this.taskIdStr); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetJobTasksAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobTasks(hsr,jobIdStr,"m"); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobTasks(hsr,jobIdStr,"m"); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetSingleTaskCountersAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getSingleTaskCounters(hsr,this.jobIdStr,this.taskIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getSingleTaskCounters(hsr,this.jobIdStr,this.taskIdStr); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetJobTaskAttemptIdCountersAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobTaskAttemptIdCounters(hsr,this.jobIdStr,this.taskIdStr,this.taskAttemptIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobTaskAttemptIdCounters(hsr,this.jobIdStr,this.taskIdStr,this.taskAttemptIdStr); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetJobTaskAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobTask(hsr,jobIdStr,this.taskIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobTask(hsr,this.jobIdStr,this.taskIdStr); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetJobCountersAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobCounters(hsr,jobIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobCounters(hsr,jobIdStr); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetJobConfAcls(){ HttpServletRequest hsr=mock(HttpServletRequest.class); when(hsr.getRemoteUser()).thenReturn(ENEMY_USER); try { hsWebServices.getJobConf(hsr,jobIdStr); fail("enemy can access job"); } catch ( WebApplicationException e) { assertEquals(Status.UNAUTHORIZED,Status.fromStatusCode(e.getResponse().getStatus())); } when(hsr.getRemoteUser()).thenReturn(FRIENDLY_USER); hsWebServices.getJobConf(hsr,jobIdStr); }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesAttempts

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptsSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyHsTaskAttempts(json,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptIdXMLCounters() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("jobTaskAttemptCounters"); verifyHsTaskCountersXML(nodes,att); } } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptIdDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("taskAttempt"); verifyHsTaskAttempt(info,att,task.getType()); } } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptsDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyHsTaskAttempts(json,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptsXML() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList attempts=dom.getElementsByTagName("taskAttempts"); assertEquals("incorrect number of elements",1,attempts.getLength()); NodeList nodes=dom.getElementsByTagName("taskAttempt"); verifyHsTaskAttemptsXML(nodes,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptIdCounters() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobTaskAttemptCounters"); verifyHsJobTaskAttemptCounters(info,att); } } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttempts() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyHsTaskAttempts(json,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptId() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("taskAttempt"); verifyHsTaskAttempt(info,att,task.getType()); } } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptIdXML() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("taskAttempt"); for (int i=0; i < nodes.getLength(); i++) { Element element=(Element)nodes.item(i); verifyHsTaskAttemptXML(element,att,task.getType()); } } } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskAttemptIdSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); for ( TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid=att.getID(); String attid=MRApps.toString(attemptid); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("attempts").path(attid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("taskAttempt"); verifyHsTaskAttempt(info,att,task.getType()); } } } }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobConf

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobConfSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("conf"); verifyHsJobConf(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobConf() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("conf"); verifyHsJobConf(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobConfDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("conf"); verifyHsJobConf(info,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobConfXML() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("conf").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList info=dom.getElementsByTagName("conf"); verifyHsJobConfXML(info,jobsMap.get(id)); } }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobs

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobAttemptsDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobAttempts"); verifyHsJobAttempts(info,appContext.getJob(id)); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobIdInvalidDefault() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_foo").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyJobIdInvalid(message,type,classname); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobIdInvalidXML() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_XML).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String msg=response.getEntity(String.class); System.out.println(msg); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(msg)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("RemoteException"); Element element=(Element)nodes.item(0); String message=WebServicesTestUtils.getXmlString(element,"message"); String type=WebServicesTestUtils.getXmlString(element,"exception"); String classname=WebServicesTestUtils.getXmlString(element,"javaClassName"); verifyJobIdInvalid(message,type,classname); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobIdNonExist() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_0_1234").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: job, job_0_1234, is not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobCountersForKilledJob() throws Exception { WebResource r=resource(); appContext=new MockHistoryContext(0,1,1,1,true); injector=Guice.createInjector(new ServletModule(){ @Override protected void configureServlets(){ webApp=mock(HsWebApp.class); when(webApp.name()).thenReturn("hsmockwebapp"); bind(JAXBContextResolver.class); bind(HsWebServices.class); bind(GenericExceptionHandler.class); bind(WebApp.class).toInstance(webApp); bind(AppContext.class).toInstance(appContext); bind(HistoryContext.class).toInstance(appContext); bind(Configuration.class).toInstance(conf); serve("/*").with(GuiceContainer.class); } } ); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobCounters"); WebServicesTestUtils.checkStringMatch("id",MRApps.toString(id),info.getString("id")); assertTrue("Job shouldn't contain any counters",info.length() == 1); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobAttempts() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobAttempts"); verifyHsJobAttempts(info,appContext.getJob(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobCountersDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobCounters"); verifyHsJobCounters(info,appContext.getJob(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobIdSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("job"); VerifyJobsUtils.verifyHsJob(info,appContext.getJob(id)); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobIdInvalidBogus() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("bogusfoo").get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: JobId string : " + "bogusfoo is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobAttemptsSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobAttempts"); verifyHsJobAttempts(info,appContext.getJob(id)); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",1,arr.length()); JSONObject info=arr.getJSONObject(0); Job job=appContext.getPartialJob(MRApps.toJobID(info.getString("id"))); VerifyJobsUtils.verifyHsJobPartial(info,job); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobId() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("job"); VerifyJobsUtils.verifyHsJob(info,appContext.getJob(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobIdXML() throws Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList job=dom.getElementsByTagName("job"); verifyHsJobXML(job,appContext); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobCounters() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobCounters"); verifyHsJobCounters(info,appContext.getJob(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobCountersSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobCounters"); verifyHsJobCounters(info,appContext.getJob(id)); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobs() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",1,arr.length()); JSONObject info=arr.getJSONObject(0); Job job=appContext.getPartialJob(MRApps.toJobID(info.getString("id"))); VerifyJobsUtils.verifyHsJobPartial(info,job); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobAttemptsXML() throws Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("jobattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList attempts=dom.getElementsByTagName("jobAttempts"); assertEquals("incorrect number of elements",1,attempts.getLength()); NodeList info=dom.getElementsByTagName("jobAttempt"); verifyHsJobAttemptsXML(info,appContext.getJob(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobsXML() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList jobs=dom.getElementsByTagName("jobs"); assertEquals("incorrect number of elements",1,jobs.getLength()); NodeList job=dom.getElementsByTagName("job"); assertEquals("incorrect number of elements",1,job.getLength()); verifyHsJobPartialXML(job,appContext); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobIdDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("job"); VerifyJobsUtils.verifyHsJob(info,appContext.getJob(id)); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobIdInvalid() throws JSONException, Exception { WebResource r=resource(); try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path("job_foo").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyJobIdInvalid(message,type,classname); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobCountersXML() throws Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList info=dom.getElementsByTagName("jobCounters"); verifyHsJobCountersXML(info,appContext.getJob(id)); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",1,arr.length()); JSONObject info=arr.getJSONObject(0); Job job=appContext.getPartialJob(MRApps.toJobID(info.getString("id"))); VerifyJobsUtils.verifyHsJobPartial(info,job); }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesJobsQuery

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryFinishTimeBegin() throws JSONException, Exception { WebResource r=resource(); Long now=System.currentTimeMillis(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",3,arr.length()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryUserNone() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("user","bogus").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs")); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryFinishTimeEndNegative() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeEnd",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: finishedTimeEnd must be greater than 0",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryQueueNonExist() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("queue","bogus").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs")); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobsQueryFinishTimeBeginEnd() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); int size=jobsMap.size(); ArrayList finishTime=new ArrayList(size); for ( Map.Entry entry : jobsMap.entrySet()) { finishTime.add(entry.getValue().getReport().getFinishTime()); } Collections.sort(finishTime); assertTrue("Error we must have atleast 3 jobs",size >= 3); long midFinishTime=finishTime.get(size - 2); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(40000)).queryParam("finishedTimeEnd",String.valueOf(midFinishTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",size - 1,arr.length()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryFinishTimeEndInvalidformat() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeEnd","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryFinishTimeInvalidformat() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryStartTimeInvalidformat() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryStartTimeEndInvalidformat() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeEnd","efsd").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Invalid number format: For input string: \"efsd\"",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryLimit() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("limit","2").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",2,arr.length()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryStartTimeEnd() throws JSONException, Exception { WebResource r=resource(); Long now=System.currentTimeMillis(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeEnd",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",3,arr.length()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobsQueryStartTimeBeginEnd() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); int size=jobsMap.size(); ArrayList startTime=new ArrayList(size); for ( Map.Entry entry : jobsMap.entrySet()) { startTime.add(entry.getValue().getReport().getStartTime()); } Collections.sort(startTime); assertTrue("Error we must have atleast 3 jobs",size >= 3); long midStartTime=startTime.get(size - 2); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(40000)).queryParam("startedTimeEnd",String.valueOf(midStartTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",size - 1,arr.length()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryFinishTimeBeginEndInvalid() throws JSONException, Exception { WebResource r=resource(); Long now=System.currentTimeMillis(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(now)).queryParam("finishedTimeEnd",String.valueOf(40000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: finishedTimeEnd must be greater than finishedTimeBegin",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryStateInvalid() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state","InvalidState").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","org.apache.hadoop.mapreduce.v2.api.records.JobState.InvalidState",message); WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryStartTimeEndNegative() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeEnd",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: startedTimeEnd must be greater than 0",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryUser() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("user","mock").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); System.out.println(json.toString()); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",3,arr.length()); JSONObject info=arr.getJSONObject(0); Job job=appContext.getPartialJob(MRApps.toJobID(info.getString("id"))); VerifyJobsUtils.verifyHsJobPartial(info,job); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryState() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); String queryState="BOGUS"; JobId jid=null; for ( Map.Entry entry : jobsMap.entrySet()) { jid=entry.getValue().getID(); queryState=entry.getValue().getState().toString(); break; } ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state",queryState).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",1,arr.length()); JSONObject info=arr.getJSONObject(0); Job job=appContext.getPartialJob(jid); VerifyJobsUtils.verifyHsJobPartial(info,job); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryFinishTimeBeginNegative() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeBegin",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: finishedTimeBegin must be greater than 0",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryLimitInvalid() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("limit","-1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: limit value must be greater then 0",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryStartTimeBegin() throws JSONException, Exception { WebResource r=resource(); Long now=System.currentTimeMillis(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs")); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryStartTimeNegative() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(-1000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: startedTimeBegin must be greater than 0",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobsQueryStateNone() throws JSONException, Exception { WebResource r=resource(); ArrayList JOB_STATES=new ArrayList(Arrays.asList(JobState.values())); Map jobsMap=appContext.getAllJobs(); for ( Map.Entry entry : jobsMap.entrySet()) { JOB_STATES.remove(entry.getValue().getState()); } assertTrue("No unused job states",JOB_STATES.size() > 0); JobState notInUse=JOB_STATES.get(0); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("state",notInUse.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryFinishTimeEnd() throws JSONException, Exception { WebResource r=resource(); Long now=System.currentTimeMillis(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("finishedTimeEnd",String.valueOf(now)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("jobs is not null",JSONObject.NULL,json.get("jobs")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryStartTimeBeginEndInvalid() throws JSONException, Exception { WebResource r=resource(); Long now=System.currentTimeMillis(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("startedTimeBegin",String.valueOf(now)).queryParam("startedTimeEnd",String.valueOf(40000)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: startedTimeEnd must be greater than startTimeBegin",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testJobsQueryQueue() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").queryParam("queue","mockqueue").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject jobs=json.getJSONObject("jobs"); JSONArray arr=jobs.getJSONArray("job"); assertEquals("incorrect number of elements",3,arr.length()); }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestHsWebServicesTasks

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskIdInvalid() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_d_000000"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Bad TaskType identifier. TaskId string : " + "task_0_0000_d_000000 is not properly formed.",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdCounters() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobTaskCounters"); verifyHsJobTaskCounters(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdCountersSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobTaskCounters"); verifyHsJobTaskCounters(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasksQueryReduce() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String type="r"; ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject tasks=json.getJSONObject("tasks"); JSONArray arr=tasks.getJSONArray("task"); assertEquals("incorrect number of elements",1,arr.length()); verifyHsTask(arr,jobsMap.get(id),type); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testJobTaskCountersXML() throws Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList info=dom.getElementsByTagName("jobTaskCounters"); verifyHsTaskCountersXML(info,task); } } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskIdNonExist() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_m_000000"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: task not found with id task_0_0000_m_000000",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdCountersDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).path("counters").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("jobTaskCounters"); verifyHsJobTaskCounters(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasksXML() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList tasks=dom.getElementsByTagName("tasks"); assertEquals("incorrect number of elements",1,tasks.getLength()); NodeList task=dom.getElementsByTagName("task"); verifyHsTaskXML(task,jobsMap.get(id)); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasksSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject tasks=json.getJSONObject("tasks"); JSONArray arr=tasks.getJSONArray("task"); assertEquals("incorrect number of elements",2,arr.length()); verifyHsTask(arr,jobsMap.get(id),null); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskIdInvalid3() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0_0000_m"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0_0000_m is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskId() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("task"); verifyHsSingleTask(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("task"); verifyHsSingleTask(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasksDefault() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject tasks=json.getJSONObject("tasks"); JSONArray arr=tasks.getJSONArray("task"); assertEquals("incorrect number of elements",2,arr.length()); verifyHsTask(arr,jobsMap.get(id),null); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskIdInvalid2() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="task_0000_m_000000"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "task_0000_m_000000 is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasks() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject tasks=json.getJSONObject("tasks"); JSONArray arr=tasks.getJSONArray("task"); assertEquals("incorrect number of elements",2,arr.length()); verifyHsTask(arr,jobsMap.get(id),null); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdSlash() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject info=json.getJSONObject("task"); verifyHsSingleTask(info,task); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTasksQueryMap() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String type="m"; ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",type).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject tasks=json.getJSONObject("tasks"); JSONArray arr=tasks.getJSONArray("task"); assertEquals("incorrect number of elements",1,arr.length()); verifyHsTask(arr,jobsMap.get(id),type); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTaskIdXML() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); for ( Task task : jobsMap.get(id).getTasks().values()) { String tid=MRApps.toString(task.getID()); ClientResponse response=r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("task"); for (int i=0; i < nodes.getLength(); i++) { Element element=(Element)nodes.item(i); verifyHsSingleTaskXML(element,task); } } } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTaskIdBogus() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tid="bogustaskid"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").path(tid).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: TaskId string : " + "bogustaskid is not properly formed",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testTasksQueryInvalid() throws JSONException, Exception { WebResource r=resource(); Map jobsMap=appContext.getAllJobs(); for ( JobId id : jobsMap.keySet()) { String jobId=MRApps.toString(id); String tasktype="reduce"; try { r.path("ws").path("v1").path("history").path("mapreduce").path("jobs").path(jobId).path("tasks").queryParam("type",tasktype).accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: tasktype must be either m or r",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } } }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.TestMapReduceTrackingUriPlugin

    InternalCallVerifier EqualityVerifier 
    @Test public void testProducesHistoryServerUriForAppId() throws URISyntaxException { final String historyAddress="example.net:424242"; YarnConfiguration conf=new YarnConfiguration(); conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,historyAddress); MapReduceTrackingUriPlugin plugin=new MapReduceTrackingUriPlugin(); plugin.setConf(conf); ApplicationId id=ApplicationId.newInstance(6384623l,5); String jobSuffix=id.toString().replaceFirst("^application_","job_"); URI expected=new URI("http://" + historyAddress + "/jobhistory/job/"+ jobSuffix); URI actual=plugin.getTrackingUri(id); assertEquals(expected,actual); }

    Class: org.apache.hadoop.mapreduce.v2.hs.webapp.dao.TestJobInfo

    InternalCallVerifier EqualityVerifier 
    @Test public void testAverageReduceTime(){ Job job=mock(CompletedJob.class); final Task task1=mock(Task.class); final Task task2=mock(Task.class); JobId jobId=MRBuilderUtils.newJobId(1L,1,1); final TaskId taskId1=MRBuilderUtils.newTaskId(jobId,1,TaskType.REDUCE); final TaskId taskId2=MRBuilderUtils.newTaskId(jobId,2,TaskType.REDUCE); final TaskAttemptId taskAttemptId1=MRBuilderUtils.newTaskAttemptId(taskId1,1); final TaskAttemptId taskAttemptId2=MRBuilderUtils.newTaskAttemptId(taskId2,2); final TaskAttempt taskAttempt1=mock(TaskAttempt.class); final TaskAttempt taskAttempt2=mock(TaskAttempt.class); JobReport jobReport=mock(JobReport.class); when(taskAttempt1.getState()).thenReturn(TaskAttemptState.SUCCEEDED); when(taskAttempt1.getLaunchTime()).thenReturn(0L); when(taskAttempt1.getShuffleFinishTime()).thenReturn(4L); when(taskAttempt1.getSortFinishTime()).thenReturn(6L); when(taskAttempt1.getFinishTime()).thenReturn(8L); when(taskAttempt2.getState()).thenReturn(TaskAttemptState.SUCCEEDED); when(taskAttempt2.getLaunchTime()).thenReturn(5L); when(taskAttempt2.getShuffleFinishTime()).thenReturn(10L); when(taskAttempt2.getSortFinishTime()).thenReturn(22L); when(taskAttempt2.getFinishTime()).thenReturn(42L); when(task1.getType()).thenReturn(TaskType.REDUCE); when(task2.getType()).thenReturn(TaskType.REDUCE); when(task1.getAttempts()).thenReturn(new HashMap(){ { put(taskAttemptId1,taskAttempt1); } } ); when(task2.getAttempts()).thenReturn(new HashMap(){ { put(taskAttemptId2,taskAttempt2); } } ); when(job.getTasks()).thenReturn(new HashMap(){ { put(taskId1,task1); put(taskId2,task2); } } ); when(job.getID()).thenReturn(jobId); when(job.getReport()).thenReturn(jobReport); when(job.getName()).thenReturn("TestJobInfo"); when(job.getState()).thenReturn(JobState.SUCCEEDED); JobInfo jobInfo=new JobInfo(job); Assert.assertEquals(11L,jobInfo.getAvgReduceTime().longValue()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=10000) public void testAverageMergeTime() throws IOException { String historyFileName="job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist"; String confFileName="job_1329348432655_0001_conf.xml"; Configuration conf=new Configuration(); JobACLsManager jobAclsMgr=new JobACLsManager(conf); Path fulleHistoryPath=new Path(TestJobHistoryEntities.class.getClassLoader().getResource(historyFileName).getFile()); Path fullConfPath=new Path(TestJobHistoryEntities.class.getClassLoader().getResource(confFileName).getFile()); HistoryFileInfo info=mock(HistoryFileInfo.class); when(info.getConfFile()).thenReturn(fullConfPath); JobId jobId=MRBuilderUtils.newJobId(1329348432655l,1,1); CompletedJob completedJob=new CompletedJob(conf,jobId,fulleHistoryPath,true,"user",info,jobAclsMgr); JobInfo jobInfo=new JobInfo(completedJob); Assert.assertEquals(50L,jobInfo.getAvgMergeTime().longValue()); }

    Class: org.apache.hadoop.mapreduce.v2.jobhistory.TestFileNameIndexUtils

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testJobHistoryFileNameBackwardsCompatible() throws IOException { JobID oldJobId=JobID.forName(JOB_ID); JobId jobId=TypeConverter.toYarn(oldJobId); long submitTime=Long.parseLong(SUBMIT_TIME); long finishTime=Long.parseLong(FINISH_TIME); int numMaps=Integer.parseInt(NUM_MAPS); int numReduces=Integer.parseInt(NUM_REDUCES); String jobHistoryFile=String.format(OLD_JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS); JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile); Assert.assertEquals("Job id incorrect after decoding old history file",jobId,info.getJobId()); Assert.assertEquals("Submit time incorrect after decoding old history file",submitTime,info.getSubmitTime()); Assert.assertEquals("User incorrect after decoding old history file",USER_NAME,info.getUser()); Assert.assertEquals("Job name incorrect after decoding old history file",JOB_NAME,info.getJobName()); Assert.assertEquals("Finish time incorrect after decoding old history file",finishTime,info.getFinishTime()); Assert.assertEquals("Num maps incorrect after decoding old history file",numMaps,info.getNumMaps()); Assert.assertEquals("Num reduces incorrect after decoding old history file",numReduces,info.getNumReduces()); Assert.assertEquals("Job status incorrect after decoding old history file",JOB_STATUS,info.getJobStatus()); Assert.assertNull("Queue name incorrect after decoding old history file",info.getQueueName()); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testUserNamePercentDecoding() throws IOException { String jobHistoryFile=String.format(JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME_WITH_DELIMITER_ESCAPE,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS,QUEUE_NAME,JOB_START_TIME); JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile); Assert.assertEquals("User name doesn't match",USER_NAME_WITH_DELIMITER,info.getUser()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testEncodingDecodingEquivalence() throws IOException { JobIndexInfo info=new JobIndexInfo(); JobID oldJobId=JobID.forName(JOB_ID); JobId jobId=TypeConverter.toYarn(oldJobId); info.setJobId(jobId); info.setSubmitTime(Long.parseLong(SUBMIT_TIME)); info.setUser(USER_NAME); info.setJobName(JOB_NAME); info.setFinishTime(Long.parseLong(FINISH_TIME)); info.setNumMaps(Integer.parseInt(NUM_MAPS)); info.setNumReduces(Integer.parseInt(NUM_REDUCES)); info.setJobStatus(JOB_STATUS); info.setQueueName(QUEUE_NAME); info.setJobStartTime(Long.parseLong(JOB_START_TIME)); String jobHistoryFile=FileNameIndexUtils.getDoneFileName(info); JobIndexInfo parsedInfo=FileNameIndexUtils.getIndexInfo(jobHistoryFile); Assert.assertEquals("Job id different after encoding and decoding",info.getJobId(),parsedInfo.getJobId()); Assert.assertEquals("Submit time different after encoding and decoding",info.getSubmitTime(),parsedInfo.getSubmitTime()); Assert.assertEquals("User different after encoding and decoding",info.getUser(),parsedInfo.getUser()); Assert.assertEquals("Job name different after encoding and decoding",info.getJobName(),parsedInfo.getJobName()); Assert.assertEquals("Finish time different after encoding and decoding",info.getFinishTime(),parsedInfo.getFinishTime()); Assert.assertEquals("Num maps different after encoding and decoding",info.getNumMaps(),parsedInfo.getNumMaps()); Assert.assertEquals("Num reduces different after encoding and decoding",info.getNumReduces(),parsedInfo.getNumReduces()); Assert.assertEquals("Job status different after encoding and decoding",info.getJobStatus(),parsedInfo.getJobStatus()); Assert.assertEquals("Queue name different after encoding and decoding",info.getQueueName(),parsedInfo.getQueueName()); Assert.assertEquals("Job start time different after encoding and decoding",info.getJobStartTime(),parsedInfo.getJobStartTime()); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testJobNamePercentDecoding() throws IOException { String jobHistoryFile=String.format(JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME_WITH_DELIMITER_ESCAPE,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS,QUEUE_NAME,JOB_START_TIME); JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile); Assert.assertEquals("Job name doesn't match",JOB_NAME_WITH_DELIMITER,info.getJobName()); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testJobStartTimeBackwardsCompatible() throws IOException { String jobHistoryFile=String.format(OLD_FORMAT_BEFORE_ADD_START_TIME,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME_WITH_DELIMITER_ESCAPE,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS,QUEUE_NAME); JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile); Assert.assertEquals(info.getJobStartTime(),info.getSubmitTime()); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testQueueNamePercentDecoding() throws IOException { String jobHistoryFile=String.format(JOB_HISTORY_FILE_FORMATTER,JOB_ID,SUBMIT_TIME,USER_NAME,JOB_NAME,FINISH_TIME,NUM_MAPS,NUM_REDUCES,JOB_STATUS,QUEUE_NAME_WITH_DELIMITER_ESCAPE,JOB_START_TIME); JobIndexInfo info=FileNameIndexUtils.getIndexInfo(jobHistoryFile); Assert.assertEquals("Queue name doesn't match",QUEUE_NAME_WITH_DELIMITER,info.getQueueName()); }

    Class: org.apache.hadoop.mapreduce.v2.jobhistory.TestJobHistoryUtils

    APIUtilityVerifier EqualityVerifier 
    @Test @SuppressWarnings("unchecked") public void testGetHistoryDirsForCleaning() throws IOException { Path pRoot=new Path(TEST_DIR,"org.apache.hadoop.mapreduce.v2.jobhistory." + "TestJobHistoryUtils.testGetHistoryDirsForCleaning"); FileContext fc=FileContext.getFileContext(); Calendar cCal=Calendar.getInstance(); int year=2013; int month=7; int day=21; cCal.set(year,month - 1,day,1,0); long cutoff=cCal.getTimeInMillis(); clearDir(fc,pRoot); Path pId00=createPath(fc,pRoot,year,month,day,"000000"); Path pId01=createPath(fc,pRoot,year,month,day + 1,"000001"); Path pId02=createPath(fc,pRoot,year,month,day - 1,"000002"); Path pId03=createPath(fc,pRoot,year,month + 1,day,"000003"); Path pId04=createPath(fc,pRoot,year,month + 1,day + 1,"000004"); Path pId05=createPath(fc,pRoot,year,month + 1,day - 1,"000005"); Path pId06=createPath(fc,pRoot,year,month - 1,day,"000006"); Path pId07=createPath(fc,pRoot,year,month - 1,day + 1,"000007"); Path pId08=createPath(fc,pRoot,year,month - 1,day - 1,"000008"); Path pId09=createPath(fc,pRoot,year + 1,month,day,"000009"); Path pId10=createPath(fc,pRoot,year + 1,month,day + 1,"000010"); Path pId11=createPath(fc,pRoot,year + 1,month,day - 1,"000011"); Path pId12=createPath(fc,pRoot,year + 1,month + 1,day,"000012"); Path pId13=createPath(fc,pRoot,year + 1,month + 1,day + 1,"000013"); Path pId14=createPath(fc,pRoot,year + 1,month + 1,day - 1,"000014"); Path pId15=createPath(fc,pRoot,year + 1,month - 1,day,"000015"); Path pId16=createPath(fc,pRoot,year + 1,month - 1,day + 1,"000016"); Path pId17=createPath(fc,pRoot,year + 1,month - 1,day - 1,"000017"); Path pId18=createPath(fc,pRoot,year - 1,month,day,"000018"); Path pId19=createPath(fc,pRoot,year - 1,month,day + 1,"000019"); Path pId20=createPath(fc,pRoot,year - 1,month,day - 1,"000020"); Path pId21=createPath(fc,pRoot,year - 1,month + 1,day,"000021"); Path pId22=createPath(fc,pRoot,year - 1,month + 1,day + 1,"000022"); Path pId23=createPath(fc,pRoot,year - 1,month + 1,day - 1,"000023"); Path pId24=createPath(fc,pRoot,year - 1,month - 1,day,"000024"); Path pId25=createPath(fc,pRoot,year - 1,month - 1,day + 1,"000025"); Path pId26=createPath(fc,pRoot,year - 1,month - 1,day - 1,"000026"); Path pId27=createPath(fc,pRoot,"foo","" + month,"" + day,"000027"); Path pId28=createPath(fc,pRoot,"" + year,"foo","" + day,"000028"); Path pId29=createPath(fc,pRoot,"" + year,"" + month,"foo","000029"); List dirs=JobHistoryUtils.getHistoryDirsForCleaning(fc,pRoot,cutoff); Collections.sort(dirs); Assert.assertEquals(14,dirs.size()); Assert.assertEquals(pId26.toUri().getPath(),dirs.get(0).getPath().toUri().getPath()); Assert.assertEquals(pId24.toUri().getPath(),dirs.get(1).getPath().toUri().getPath()); Assert.assertEquals(pId25.toUri().getPath(),dirs.get(2).getPath().toUri().getPath()); Assert.assertEquals(pId20.toUri().getPath(),dirs.get(3).getPath().toUri().getPath()); Assert.assertEquals(pId18.toUri().getPath(),dirs.get(4).getPath().toUri().getPath()); Assert.assertEquals(pId19.toUri().getPath(),dirs.get(5).getPath().toUri().getPath()); Assert.assertEquals(pId23.toUri().getPath(),dirs.get(6).getPath().toUri().getPath()); Assert.assertEquals(pId21.toUri().getPath(),dirs.get(7).getPath().toUri().getPath()); Assert.assertEquals(pId22.toUri().getPath(),dirs.get(8).getPath().toUri().getPath()); Assert.assertEquals(pId08.toUri().getPath(),dirs.get(9).getPath().toUri().getPath()); Assert.assertEquals(pId06.toUri().getPath(),dirs.get(10).getPath().toUri().getPath()); Assert.assertEquals(pId07.toUri().getPath(),dirs.get(11).getPath().toUri().getPath()); Assert.assertEquals(pId02.toUri().getPath(),dirs.get(12).getPath().toUri().getPath()); Assert.assertEquals(pId00.toUri().getPath(),dirs.get(13).getPath().toUri().getPath()); }

    Class: org.apache.hadoop.mapreduce.v2.util.TestMRApps

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=120000) public void testToTaskID(){ TaskId tid=MRApps.toTaskID("task_1_2_r_3"); assertEquals(1,tid.getJobId().getAppId().getClusterTimestamp()); assertEquals(2,tid.getJobId().getAppId().getId()); assertEquals(2,tid.getJobId().getId()); assertEquals(TaskType.REDUCE,tid.getTaskType()); assertEquals(3,tid.getId()); tid=MRApps.toTaskID("task_1_2_m_3"); assertEquals(TaskType.MAP,tid.getTaskType()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=120000) public void testToJobID(){ JobId jid=MRApps.toJobID("job_1_1"); assertEquals(1,jid.getAppId().getClusterTimestamp()); assertEquals(1,jid.getAppId().getId()); assertEquals(1,jid.getId()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=120000) public void testGetJobFileWithUser(){ Configuration conf=new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR,"/my/path/to/staging"); String jobFile=MRApps.getJobFile(conf,"dummy-user",new JobID("dummy-job",12345)); assertNotNull("getJobFile results in null.",jobFile); assertEquals("jobFile with specified user is not as expected.","/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml",jobFile); }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=120000) public void testSetClasspathWithJobClassloader() throws IOException { Configuration conf=new Configuration(); conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true); conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER,true); Map env=new HashMap(); MRApps.setClasspath(env,conf); String cp=env.get("CLASSPATH"); String appCp=env.get("APP_CLASSPATH"); assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the" + " classpath!",cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job")); assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",cp.contains("PWD")); String expectedAppClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),"job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*")); assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app" + " classpath!",expectedAppClasspath,appCp); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("deprecation") @Test(timeout=30000) public void testSetupDistributedCache() throws Exception { Configuration conf=new Configuration(); conf.setClass("fs.mockfs.impl",MockFileSystem.class,FileSystem.class); URI mockUri=URI.create("mockfs://mock/"); FileSystem mockFs=((FilterFileSystem)FileSystem.get(mockUri,conf)).getRawFileSystem(); URI archive=new URI("mockfs://mock/tmp/something.zip"); Path archivePath=new Path(archive); URI file=new URI("mockfs://mock/tmp/something.txt#something"); Path filePath=new Path(file); when(mockFs.resolvePath(archivePath)).thenReturn(archivePath); when(mockFs.resolvePath(filePath)).thenReturn(filePath); DistributedCache.addCacheArchive(archive,conf); conf.set(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS,"10"); conf.set(MRJobConfig.CACHE_ARCHIVES_SIZES,"10"); conf.set(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES,"true"); DistributedCache.addCacheFile(file,conf); conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS,"11"); conf.set(MRJobConfig.CACHE_FILES_SIZES,"11"); conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES,"true"); Map localResources=new HashMap(); MRApps.setupDistributedCache(conf,localResources); assertEquals(2,localResources.size()); LocalResource lr=localResources.get("something.zip"); assertNotNull(lr); assertEquals(10l,lr.getSize()); assertEquals(10l,lr.getTimestamp()); assertEquals(LocalResourceType.ARCHIVE,lr.getType()); lr=localResources.get("something"); assertNotNull(lr); assertEquals(11l,lr.getSize()); assertEquals(11l,lr.getTimestamp()); assertEquals(LocalResourceType.FILE,lr.getType()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=120000) public void testTaskIDtoString(){ TaskId tid=RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class); tid.setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class)); tid.getJobId().setAppId(ApplicationId.newInstance(0,0)); tid.setTaskType(TaskType.MAP); TaskType type=tid.getTaskType(); System.err.println(type); type=TaskType.REDUCE; System.err.println(type); System.err.println(tid.getTaskType()); assertEquals("task_0_0000_m_000000",MRApps.toString(tid)); tid.setTaskType(TaskType.REDUCE); assertEquals("task_0_0000_r_000000",MRApps.toString(tid)); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=120000) public void testTaskAttemptIDtoString(){ TaskAttemptId taid=RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptId.class); taid.setTaskId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class)); taid.getTaskId().setTaskType(TaskType.MAP); taid.getTaskId().setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class)); taid.getTaskId().getJobId().setAppId(ApplicationId.newInstance(0,0)); assertEquals("attempt_0_0000_m_000000_0",MRApps.toString(taid)); }

    APIUtilityVerifier UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=3000000) public void testSetClasspathWithFramework() throws IOException { final String FRAMEWORK_NAME="some-framework-name"; final String FRAMEWORK_PATH="some-framework-path#" + FRAMEWORK_NAME; Configuration conf=new Configuration(); conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM,true); conf.set(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH,FRAMEWORK_PATH); Map env=new HashMap(); try { MRApps.setClasspath(env,conf); fail("Failed to catch framework path set without classpath change"); } catch ( IllegalArgumentException e) { assertTrue("Unexpected IllegalArgumentException",e.getMessage().contains("Could not locate MapReduce framework name '" + FRAMEWORK_NAME + "'")); } env.clear(); final String FRAMEWORK_CLASSPATH=FRAMEWORK_NAME + "/*.jar"; conf.set(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,FRAMEWORK_CLASSPATH); MRApps.setClasspath(env,conf); final String stdClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList("job.jar/job.jar","job.jar/classes/","job.jar/lib/*",ApplicationConstants.Environment.PWD.$$() + "/*")); String expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),FRAMEWORK_CLASSPATH,stdClasspath)); assertEquals("Incorrect classpath with framework and no user precedence",expectedClasspath,env.get("CLASSPATH")); env.clear(); conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST,true); MRApps.setClasspath(env,conf); expectedClasspath=StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,Arrays.asList(ApplicationConstants.Environment.PWD.$$(),stdClasspath,FRAMEWORK_CLASSPATH)); assertEquals("Incorrect classpath with framework and user precedence",expectedClasspath,env.get("CLASSPATH")); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=120000) public void testJobIDtoString(){ JobId jid=RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class); jid.setAppId(ApplicationId.newInstance(0,0)); assertEquals("job_0_0000",MRApps.toString(jid)); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=120000) public void testToTaskAttemptID(){ TaskAttemptId taid=MRApps.toTaskAttemptID("attempt_0_1_m_2_3"); assertEquals(0,taid.getTaskId().getJobId().getAppId().getClusterTimestamp()); assertEquals(1,taid.getTaskId().getJobId().getAppId().getId()); assertEquals(1,taid.getTaskId().getJobId().getId()); assertEquals(2,taid.getTaskId().getId()); assertEquals(3,taid.getId()); }

    Class: org.apache.hadoop.metrics2.impl.TestGangliaMetrics

    EqualityVerifier 
    @Test public void testTagsForPrefix() throws Exception { ConfigBuilder cb=new ConfigBuilder().add("test.sink.ganglia.tagsForPrefix.all","*").add("test.sink.ganglia.tagsForPrefix.some","NumActiveSinks, " + "NumActiveSources").add("test.sink.ganglia.tagsForPrefix.none",""); GangliaSink30 sink=new GangliaSink30(); sink.init(cb.subset("test.sink.ganglia")); List tags=new ArrayList(); tags.add(new MetricsTag(MsInfo.Context,"all")); tags.add(new MetricsTag(MsInfo.NumActiveSources,"foo")); tags.add(new MetricsTag(MsInfo.NumActiveSinks,"bar")); tags.add(new MetricsTag(MsInfo.NumAllSinks,"haa")); tags.add(new MetricsTag(MsInfo.Hostname,"host")); Set metrics=new HashSet(); MetricsRecord record=new MetricsRecordImpl(MsInfo.Context,(long)1,tags,metrics); StringBuilder sb=new StringBuilder(); sink.appendPrefix(record,sb); assertEquals(".NumActiveSources=foo.NumActiveSinks=bar.NumAllSinks=haa",sb.toString()); tags.set(0,new MetricsTag(MsInfo.Context,"some")); sb=new StringBuilder(); sink.appendPrefix(record,sb); assertEquals(".NumActiveSources=foo.NumActiveSinks=bar",sb.toString()); tags.set(0,new MetricsTag(MsInfo.Context,"none")); sb=new StringBuilder(); sink.appendPrefix(record,sb); assertEquals("",sb.toString()); tags.set(0,new MetricsTag(MsInfo.Context,"nada")); sb=new StringBuilder(); sink.appendPrefix(record,sb); assertEquals("",sb.toString()); }

    Class: org.apache.hadoop.metrics2.impl.TestGraphiteMetrics

    InternalCallVerifier EqualityVerifier 
    @Test public void testPutMetrics(){ GraphiteSink sink=new GraphiteSink(); List tags=new ArrayList(); tags.add(new MetricsTag(MsInfo.Context,"all")); tags.add(new MetricsTag(MsInfo.Hostname,"host")); Set metrics=new HashSet(); metrics.add(makeMetric("foo1",1.25)); metrics.add(makeMetric("foo2",2.25)); MetricsRecord record=new MetricsRecordImpl(MsInfo.Context,(long)10000,tags,metrics); OutputStreamWriter mockWriter=mock(OutputStreamWriter.class); ArgumentCaptor argument=ArgumentCaptor.forClass(String.class); Whitebox.setInternalState(sink,"writer",mockWriter); sink.putMetrics(record); try { verify(mockWriter).write(argument.capture()); } catch ( IOException e) { e.printStackTrace(); } String result=argument.getValue().toString(); assertEquals(true,result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" + "null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") || result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" + "null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n")); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testPutMetrics2(){ GraphiteSink sink=new GraphiteSink(); List tags=new ArrayList(); tags.add(new MetricsTag(MsInfo.Context,"all")); tags.add(new MetricsTag(MsInfo.Hostname,null)); Set metrics=new HashSet(); metrics.add(makeMetric("foo1",1)); metrics.add(makeMetric("foo2",2)); MetricsRecord record=new MetricsRecordImpl(MsInfo.Context,(long)10000,tags,metrics); OutputStreamWriter mockWriter=mock(OutputStreamWriter.class); ArgumentCaptor argument=ArgumentCaptor.forClass(String.class); Whitebox.setInternalState(sink,"writer",mockWriter); sink.putMetrics(record); try { verify(mockWriter).write(argument.capture()); } catch ( IOException e) { e.printStackTrace(); } String result=argument.getValue().toString(); assertEquals(true,result.equals("null.all.Context.Context=all.foo1 1 10\n" + "null.all.Context.Context=all.foo2 2 10\n") || result.equals("null.all.Context.Context=all.foo2 2 10\n" + "null.all.Context.Context=all.foo1 1 10\n")); }

    Class: org.apache.hadoop.metrics2.impl.TestMetricsCollectorImpl

    InternalCallVerifier EqualityVerifier 
    @Test public void testPerMetricFiltering(){ SubsetConfiguration fc=new ConfigBuilder().add("p.exclude","foo").subset("p"); MetricsCollectorImpl mb=new MetricsCollectorImpl(); mb.setMetricFilter(newGlobFilter(fc)); MetricsRecordBuilderImpl rb=mb.addRecord("foo"); rb.tag(info("foo",""),"").addCounter(info("c0",""),0).addGauge(info("foo",""),1); assertEquals("1 tag",1,rb.tags().size()); assertEquals("1 metric",1,rb.metrics().size()); assertEquals("expect foo tag","foo",rb.tags().get(0).name()); assertEquals("expect c0","c0",rb.metrics().get(0).name()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void recordBuilderShouldNoOpIfFiltered(){ SubsetConfiguration fc=new ConfigBuilder().add("p.exclude","foo").subset("p"); MetricsCollectorImpl mb=new MetricsCollectorImpl(); mb.setRecordFilter(newGlobFilter(fc)); MetricsRecordBuilderImpl rb=mb.addRecord("foo"); rb.tag(info("foo",""),"value").addGauge(info("g0",""),1); assertEquals("no tags",0,rb.tags().size()); assertEquals("no metrics",0,rb.metrics().size()); assertNull("null record",rb.getRecord()); assertEquals("no records",0,mb.getRecords().size()); }

    Class: org.apache.hadoop.metrics2.impl.TestMetricsSourceAdapter

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetMetricsAndJmx() throws Exception { TestSource source=new TestSource("test"); MetricsSourceBuilder sb=MetricsAnnotations.newSourceBuilder(source); final MetricsSource s=sb.build(); List injectedTags=new ArrayList(); MetricsSourceAdapter sa=new MetricsSourceAdapter("test","test","test desc",s,injectedTags,null,null,1,false); MetricsCollectorImpl builder=new MetricsCollectorImpl(); Iterable metricsRecords=sa.getMetrics(builder,true); MetricsRecordImpl metricsRecord=metricsRecords.iterator().next(); assertEquals(0L,metricsRecord.metrics().iterator().next().value().longValue()); Thread.sleep(100); assertEquals(0L,(Number)sa.getAttribute("C1")); source.incrementCnt(); builder=new MetricsCollectorImpl(); metricsRecords=sa.getMetrics(builder,true); metricsRecord=metricsRecords.iterator().next(); assertTrue(metricsRecord.metrics().iterator().hasNext()); Thread.sleep(100); assertEquals(1L,(Number)sa.getAttribute("C1")); }

    Class: org.apache.hadoop.metrics2.impl.TestMetricsSystemImpl

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMultiThreadedPublish() throws Exception { final int numThreads=10; new ConfigBuilder().add("*.period",80).add("test.sink.collector." + MetricsConfig.QUEUE_CAPACITY_KEY,numThreads).save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); final MetricsSystemImpl ms=new MetricsSystemImpl("Test"); ms.start(); final CollectingSink sink=new CollectingSink(numThreads); ms.registerSink("collector","Collector of values from all threads.",sink); final TestSource[] sources=new TestSource[numThreads]; final Thread[] threads=new Thread[numThreads]; final String[] results=new String[numThreads]; final CyclicBarrier barrier1=new CyclicBarrier(numThreads), barrier2=new CyclicBarrier(numThreads); for (int i=0; i < numThreads; i++) { sources[i]=ms.register("threadSource" + i,"A source of my threaded goodness.",new TestSource("threadSourceRec" + i)); threads[i]=new Thread(new Runnable(){ private boolean safeAwait( int mySource, CyclicBarrier barrier){ try { barrier1.await(2,TimeUnit.SECONDS); } catch ( InterruptedException e) { results[mySource]="Interrupted"; return false; } catch ( BrokenBarrierException e) { results[mySource]="Broken Barrier"; return false; } catch ( TimeoutException e) { results[mySource]="Timed out on barrier"; return false; } return true; } @Override public void run(){ int mySource=Integer.parseInt(Thread.currentThread().getName()); if (sink.collected[mySource].get() != 0L) { results[mySource]="Someone else collected my metric!"; return; } if (!safeAwait(mySource,barrier1)) return; sources[mySource].g1.set(230); ms.publishMetricsNow(); if (!safeAwait(mySource,barrier2)) return; if (sink.collected[mySource].get() != 230L) { results[mySource]="Metric not collected!"; return; } results[mySource]="Passed"; } } ,"" + i); } for ( Thread t : threads) t.start(); for ( Thread t : threads) t.join(); assertEquals(0L,ms.droppedPubAll.value()); assertTrue(StringUtils.join("\n",Arrays.asList(results)),Iterables.all(Arrays.asList(results),new Predicate(){ @Override public boolean apply( @Nullable String input){ return input.equalsIgnoreCase("Passed"); } } )); ms.stop(); ms.shutdown(); }

    BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testInitFirstVerifyStopInvokedImmediately() throws Exception { DefaultMetricsSystem.shutdown(); new ConfigBuilder().add("*.period",8).add("test.sink.test.class",TestSink.class.getName()).add("test.*.source.filter.exclude","s0").add("test.source.s1.metric.filter.exclude","X*").add("test.sink.sink1.metric.filter.exclude","Y*").add("test.sink.sink2.metric.filter.exclude","Y*").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); MetricsSystemImpl ms=new MetricsSystemImpl("Test"); ms.start(); ms.register("s0","s0 desc",new TestSource("s0rec")); TestSource s1=ms.register("s1","s1 desc",new TestSource("s1rec")); s1.c1.incr(); s1.xxx.incr(); s1.g1.set(2); s1.yyy.incr(2); s1.s1.add(0); MetricsSink sink1=mock(MetricsSink.class); MetricsSink sink2=mock(MetricsSink.class); ms.registerSink("sink1","sink1 desc",sink1); ms.registerSink("sink2","sink2 desc",sink2); ms.publishMetricsNow(); ms.stop(); ms.shutdown(); verify(sink1,atMost(2)).putMetrics(r1.capture()); List mr1=r1.getAllValues(); verify(sink2,atMost(2)).putMetrics(r2.capture()); List mr2=r2.getAllValues(); if (mr1.size() != 0 && mr2.size() != 0) { checkMetricsRecords(mr1); assertEquals("output",mr1,mr2); } else if (mr1.size() != 0) { checkMetricsRecords(mr1); } else if (mr2.size() != 0) { checkMetricsRecords(mr2); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInitFirstVerifyCallBacks() throws Exception { DefaultMetricsSystem.shutdown(); new ConfigBuilder().add("*.period",8).add("test.sink.test.class",TestSink.class.getName()).add("test.*.source.filter.exclude","s0").add("test.source.s1.metric.filter.exclude","X*").add("test.sink.sink1.metric.filter.exclude","Y*").add("test.sink.sink2.metric.filter.exclude","Y*").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); MetricsSystemImpl ms=new MetricsSystemImpl("Test"); ms.start(); ms.register("s0","s0 desc",new TestSource("s0rec")); TestSource s1=ms.register("s1","s1 desc",new TestSource("s1rec")); s1.c1.incr(); s1.xxx.incr(); s1.g1.set(2); s1.yyy.incr(2); s1.s1.add(0); MetricsSink sink1=mock(MetricsSink.class); MetricsSink sink2=mock(MetricsSink.class); ms.registerSink("sink1","sink1 desc",sink1); ms.registerSink("sink2","sink2 desc",sink2); ms.publishMetricsNow(); try { verify(sink1,timeout(200).times(2)).putMetrics(r1.capture()); verify(sink2,timeout(200).times(2)).putMetrics(r2.capture()); } finally { ms.stop(); ms.shutdown(); } List mr1=r1.getAllValues(); List mr2=r2.getAllValues(); checkMetricsRecords(mr1); assertEquals("output",mr1,mr2); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testHangingSink(){ new ConfigBuilder().add("*.period",8).add("test.sink.test.class",TestSink.class.getName()).add("test.sink.hanging.retry.delay","1").add("test.sink.hanging.retry.backoff","1.01").add("test.sink.hanging.retry.count","0").save(TestMetricsConfig.getTestFilename("hadoop-metrics2-test")); MetricsSystemImpl ms=new MetricsSystemImpl("Test"); ms.start(); TestSource s=ms.register("s3","s3 desc",new TestSource("s3rec")); s.c1.incr(); HangingSink hanging=new HangingSink(); ms.registerSink("hanging","Hang the sink!",hanging); ms.publishMetricsNow(); assertEquals(1L,ms.droppedPubAll.value()); assertFalse(hanging.getInterrupted()); ms.stop(); ms.shutdown(); assertTrue(hanging.getInterrupted()); assertTrue("The sink didn't get called after its first hang " + "for subsequent records.",hanging.getGotCalledSecondTime()); }

    Class: org.apache.hadoop.metrics2.impl.TestMetricsVisitor

    InternalCallVerifier EqualityVerifier 
    /** * Test the common use cases */ @Test public void testCommon(){ MetricsVisitor visitor=mock(MetricsVisitor.class); MetricsRegistry registry=new MetricsRegistry("test"); List metrics=MetricsLists.builder("test").addCounter(info("c1","int counter"),1).addCounter(info("c2","long counter"),2L).addGauge(info("g1","int gauge"),5).addGauge(info("g2","long gauge"),6L).addGauge(info("g3","float gauge"),7f).addGauge(info("g4","double gauge"),8d).metrics(); for ( AbstractMetric metric : metrics) { metric.visit(visitor); } verify(visitor).counter(c1.capture(),eq(1)); assertEquals("c1 name","c1",c1.getValue().name()); assertEquals("c1 description","int counter",c1.getValue().description()); verify(visitor).counter(c2.capture(),eq(2L)); assertEquals("c2 name","c2",c2.getValue().name()); assertEquals("c2 description","long counter",c2.getValue().description()); verify(visitor).gauge(g1.capture(),eq(5)); assertEquals("g1 name","g1",g1.getValue().name()); assertEquals("g1 description","int gauge",g1.getValue().description()); verify(visitor).gauge(g2.capture(),eq(6L)); assertEquals("g2 name","g2",g2.getValue().name()); assertEquals("g2 description","long gauge",g2.getValue().description()); verify(visitor).gauge(g3.capture(),eq(7f)); assertEquals("g3 name","g3",g3.getValue().name()); assertEquals("g3 description","float gauge",g3.getValue().description()); verify(visitor).gauge(g4.capture(),eq(8d)); assertEquals("g4 name","g4",g4.getValue().name()); assertEquals("g4 description","double gauge",g4.getValue().description()); }

    Class: org.apache.hadoop.metrics2.impl.TestSinkQueue

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test concurrent consumer access, which is illegal * @throws Exception */ @Test public void testConcurrentConsumers() throws Exception { final SinkQueue q=newSleepingConsumerQueue(2,1); assertTrue("should enqueue",q.enqueue(2)); assertEquals("queue back",2,(int)q.back()); assertTrue("should drop",!q.enqueue(3)); shouldThrowCME(new Fun(){ @Override public void run(){ q.clear(); } } ); shouldThrowCME(new Fun(){ @Override public void run() throws Exception { q.consume(null); } } ); shouldThrowCME(new Fun(){ @Override public void run() throws Exception { q.consumeAll(null); } } ); shouldThrowCME(new Fun(){ @Override public void run() throws Exception { q.dequeue(); } } ); assertEquals("queue size",2,q.size()); assertEquals("queue front",1,(int)q.front()); assertEquals("queue back",2,(int)q.back()); }

    InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    /** * Test the consumer throwing exceptions * @throws Exception */ @Test public void testConsumerException() throws Exception { final SinkQueue q=new SinkQueue(1); final RuntimeException ex=new RuntimeException("expected"); q.enqueue(1); try { q.consume(new Consumer(){ @Override public void consume( Integer e){ throw ex; } } ); } catch ( Exception expected) { assertSame("consumer exception",ex,expected); } assertEquals("queue size",1,q.size()); assertEquals("element",1,(int)q.dequeue()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test common use case * @throws Exception */ @Test public void testCommon() throws Exception { final SinkQueue q=new SinkQueue(2); q.enqueue(1); assertEquals("queue front",1,(int)q.front()); assertEquals("queue back",1,(int)q.back()); assertEquals("element",1,(int)q.dequeue()); assertTrue("should enqueue",q.enqueue(2)); q.consume(new Consumer(){ @Override public void consume( Integer e){ assertEquals("element",2,(int)e); } } ); assertTrue("should enqueue",q.enqueue(3)); assertEquals("element",3,(int)q.dequeue()); assertEquals("queue size",0,q.size()); assertEquals("queue front",null,q.front()); assertEquals("queue back",null,q.back()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test nonblocking enqueue when queue is full * @throws Exception */ @Test public void testFull() throws Exception { final SinkQueue q=new SinkQueue(1); q.enqueue(1); assertTrue("should drop",!q.enqueue(2)); assertEquals("element",1,(int)q.dequeue()); q.enqueue(3); q.consume(new Consumer(){ @Override public void consume( Integer e){ assertEquals("element",3,(int)e); } } ); assertEquals("queue size",0,q.size()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test consumers that take their time. * @throws Exception */ @Test public void testHangingConsumer() throws Exception { SinkQueue q=newSleepingConsumerQueue(2,1,2); assertEquals("queue back",2,(int)q.back()); assertTrue("should drop",!q.enqueue(3)); assertEquals("queue size",2,q.size()); assertEquals("queue head",1,(int)q.front()); assertEquals("queue back",2,(int)q.back()); }

    IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test the consumeAll method * @throws Exception */ @Test public void testConsumeAll() throws Exception { final int capacity=64; final SinkQueue q=new SinkQueue(capacity); for (int i=0; i < capacity; ++i) { assertTrue("should enqueue",q.enqueue(i)); } assertTrue("should not enqueue",!q.enqueue(capacity)); final Runnable trigger=mock(Runnable.class); q.consumeAll(new Consumer(){ private int expected=0; @Override public void consume( Integer e){ assertEquals("element",expected++,(int)e); trigger.run(); } } ); verify(trigger,times(capacity)).run(); }

    InternalCallVerifier EqualityVerifier 
    /** * Test the clear method */ @Test public void testClear(){ final SinkQueue q=new SinkQueue(128); for (int i=0; i < q.capacity() + 97; ++i) { q.enqueue(i); } assertEquals("queue size",q.capacity(),q.size()); q.clear(); assertEquals("queue size",0,q.size()); }

    Class: org.apache.hadoop.metrics2.lib.TestMetricsRegistry

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test various factory methods */ @Test public void testNewMetrics(){ final MetricsRegistry r=new MetricsRegistry("test"); r.newCounter("c1","c1 desc",1); r.newCounter("c2","c2 desc",2L); r.newGauge("g1","g1 desc",3); r.newGauge("g2","g2 desc",4L); r.newStat("s1","s1 desc","ops","time"); assertEquals("num metrics in registry",5,r.metrics().size()); assertTrue("c1 found",r.get("c1") instanceof MutableCounterInt); assertTrue("c2 found",r.get("c2") instanceof MutableCounterLong); assertTrue("g1 found",r.get("g1") instanceof MutableGaugeInt); assertTrue("g2 found",r.get("g2") instanceof MutableGaugeLong); assertTrue("s1 found",r.get("s1") instanceof MutableStat); expectMetricsException("Metric name c1 already exists",new Runnable(){ @Override public void run(){ r.newCounter("c1","test dup",0); } } ); }

    Class: org.apache.hadoop.metrics2.lib.TestUniqNames

    InternalCallVerifier EqualityVerifier 
    @Test public void testCommonCases(){ UniqueNames u=new UniqueNames(); assertEquals("foo",u.uniqueName("foo")); assertEquals("foo-1",u.uniqueName("foo")); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testCollisions(){ UniqueNames u=new UniqueNames(); u.uniqueName("foo"); assertEquals("foo-1",u.uniqueName("foo-1")); assertEquals("foo-2",u.uniqueName("foo")); assertEquals("foo-1-1",u.uniqueName("foo-1")); assertEquals("foo-2-1",u.uniqueName("foo-2")); }

    Class: org.apache.hadoop.metrics2.util.TestMetricsCache

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("deprecation") @Test public void testGet(){ MetricsCache cache=new MetricsCache(); assertNull("empty",cache.get("r",Arrays.asList(makeTag("t","t")))); MetricsRecord mr=makeRecord("r",Arrays.asList(makeTag("t","t")),Arrays.asList(makeMetric("m",1))); cache.update(mr); MetricsCache.Record cr=cache.get("r",mr.tags()); LOG.debug("tags=" + mr.tags() + " cr="+ cr); assertNotNull("Got record",cr); assertEquals("contains 1 metric",1,cr.metrics().size()); checkMetricValue("new metric value",cr,"m",1); }

    InternalCallVerifier EqualityVerifier 
    @SuppressWarnings("deprecation") @Test public void testUpdate(){ MetricsCache cache=new MetricsCache(); MetricsRecord mr=makeRecord("r",Arrays.asList(makeTag("t","tv")),Arrays.asList(makeMetric("m",0),makeMetric("m1",1))); MetricsCache.Record cr=cache.update(mr); verify(mr).name(); verify(mr).tags(); verify(mr).metrics(); assertEquals("same record size",cr.metrics().size(),((Collection)mr.metrics()).size()); assertEquals("same metric value",0,cr.getMetric("m")); MetricsRecord mr2=makeRecord("r",Arrays.asList(makeTag("t","tv")),Arrays.asList(makeMetric("m",2),makeMetric("m2",42))); cr=cache.update(mr2); assertEquals("contains 3 metric",3,cr.metrics().size()); checkMetricValue("updated metric value",cr,"m",2); checkMetricValue("old metric value",cr,"m1",1); checkMetricValue("new metric value",cr,"m2",42); MetricsRecord mr3=makeRecord("r",Arrays.asList(makeTag("t","tv3")),Arrays.asList(makeMetric("m3",3))); cr=cache.update(mr3); assertEquals("contains 1 metric",1,cr.metrics().size()); checkMetricValue("updated metric value",cr,"m3",3); assertEquals("no tags",0,cr.tags().size()); cr=cache.update(mr3,true); assertEquals("Got 1 tag",1,cr.tags().size()); assertEquals("Tag value","tv3",cr.getTag("t")); checkMetricValue("Metric value",cr,"m3",3); }

    Class: org.apache.hadoop.metrics2.util.TestSampleQuantiles

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Check that counts and quantile estimates are correctly reset after a call * to {@link SampleQuantiles#clear()}. */ @Test public void testClear() throws IOException { for (int i=0; i < 1000; i++) { estimator.insert(i); } estimator.clear(); assertEquals(estimator.getCount(),0); assertEquals(estimator.getSampleCount(),0); assertNull(estimator.snapshot()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Check that the counts of the number of items in the window and sample are * incremented correctly as items are added. */ @Test public void testCount() throws IOException { assertEquals(estimator.getCount(),0); assertEquals(estimator.getSampleCount(),0); assertNull(estimator.snapshot()); estimator.insert(1337); assertEquals(estimator.getCount(),1); estimator.snapshot(); assertEquals(estimator.getSampleCount(),1); assertEquals("50.00 %ile +/- 5.00%: 1337\n" + "75.00 %ile +/- 2.50%: 1337\n" + "90.00 %ile +/- 1.00%: 1337\n"+ "95.00 %ile +/- 0.50%: 1337\n"+ "99.00 %ile +/- 0.10%: 1337",estimator.toString()); }

    Class: org.apache.hadoop.metrics2.util.TestSampleStat

    InternalCallVerifier EqualityVerifier 
    /** * Some simple use cases */ @Test public void testSimple(){ SampleStat stat=new SampleStat(); assertEquals("num samples",0,stat.numSamples()); assertEquals("mean",0.0,stat.mean(),EPSILON); assertEquals("variance",0.0,stat.variance(),EPSILON); assertEquals("stddev",0.0,stat.stddev(),EPSILON); assertEquals("min",SampleStat.MinMax.DEFAULT_MIN_VALUE,stat.min(),EPSILON); assertEquals("max",SampleStat.MinMax.DEFAULT_MAX_VALUE,stat.max(),EPSILON); stat.add(3); assertEquals("num samples",1L,stat.numSamples()); assertEquals("mean",3.0,stat.mean(),EPSILON); assertEquals("variance",0.0,stat.variance(),EPSILON); assertEquals("stddev",0.0,stat.stddev(),EPSILON); assertEquals("min",3.0,stat.min(),EPSILON); assertEquals("max",3.0,stat.max(),EPSILON); stat.add(2).add(1); assertEquals("num samples",3L,stat.numSamples()); assertEquals("mean",2.0,stat.mean(),EPSILON); assertEquals("variance",1.0,stat.variance(),EPSILON); assertEquals("stddev",1.0,stat.stddev(),EPSILON); assertEquals("min",1.0,stat.min(),EPSILON); assertEquals("max",3.0,stat.max(),EPSILON); stat.reset(); assertEquals("num samples",0,stat.numSamples()); assertEquals("mean",0.0,stat.mean(),EPSILON); assertEquals("variance",0.0,stat.variance(),EPSILON); assertEquals("stddev",0.0,stat.stddev(),EPSILON); assertEquals("min",SampleStat.MinMax.DEFAULT_MIN_VALUE,stat.min(),EPSILON); assertEquals("max",SampleStat.MinMax.DEFAULT_MAX_VALUE,stat.max(),EPSILON); }

    Class: org.apache.hadoop.minikdc.TestMiniKdc

    InternalCallVerifier EqualityVerifier 
    @Test public void testKeytabGen() throws Exception { MiniKdc kdc=getKdc(); File workDir=getWorkDir(); kdc.createPrincipal(new File(workDir,"keytab"),"foo/bar","bar/foo"); Keytab kt=Keytab.read(new File(workDir,"keytab")); Set principals=new HashSet(); for ( KeytabEntry entry : kt.getEntries()) { principals.add(entry.getPrincipalName()); } Assert.assertEquals(new HashSet(Arrays.asList("foo\\bar@" + kdc.getRealm(),"bar\\foo@" + kdc.getRealm())),principals); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testKerberosLogin() throws Exception { MiniKdc kdc=getKdc(); File workDir=getWorkDir(); LoginContext loginContext=null; try { String principal="foo"; File keytab=new File(workDir,"foo.keytab"); kdc.createPrincipal(keytab,principal); Set principals=new HashSet(); principals.add(new KerberosPrincipal(principal)); Subject subject=new Subject(false,principals,new HashSet(),new HashSet()); loginContext=new LoginContext("",subject,null,KerberosConfiguration.createClientConfig(principal,keytab)); loginContext.login(); subject=loginContext.getSubject(); Assert.assertEquals(1,subject.getPrincipals().size()); Assert.assertEquals(KerberosPrincipal.class,subject.getPrincipals().iterator().next().getClass()); Assert.assertEquals(principal + "@" + kdc.getRealm(),subject.getPrincipals().iterator().next().getName()); loginContext.logout(); subject=new Subject(false,principals,new HashSet(),new HashSet()); loginContext=new LoginContext("",subject,null,KerberosConfiguration.createServerConfig(principal,keytab)); loginContext.login(); subject=loginContext.getSubject(); Assert.assertEquals(1,subject.getPrincipals().size()); Assert.assertEquals(KerberosPrincipal.class,subject.getPrincipals().iterator().next().getClass()); Assert.assertEquals(principal + "@" + kdc.getRealm(),subject.getPrincipals().iterator().next().getName()); loginContext.logout(); } finally { if (loginContext != null) { loginContext.logout(); } } }

    Class: org.apache.hadoop.net.TestDNS

    UtilityVerifier EqualityVerifier HybridVerifier 
    /** * Get the IP addresses of an unknown interface */ @Test public void testIPsOfUnknownInterface() throws Exception { try { DNS.getIPs("name-of-an-unknown-interface"); fail("Got an IP for a bogus interface"); } catch ( UnknownHostException e) { assertEquals("No such interface name-of-an-unknown-interface",e.getMessage()); } }

    APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Test that repeated calls to getting the local host are fairly fast, and * hence that caching is being used * @throws Exception if hostname lookups fail */ @Test public void testGetLocalHostIsFast() throws Exception { String hostname1=DNS.getDefaultHost(DEFAULT); assertNotNull(hostname1); String hostname2=DNS.getDefaultHost(DEFAULT); long t1=Time.now(); String hostname3=DNS.getDefaultHost(DEFAULT); long t2=Time.now(); assertEquals(hostname3,hostname2); assertEquals(hostname2,hostname1); long interval=t2 - t1; assertTrue("Took too long to determine local host - caching is not working",interval < 20000); }

    APIUtilityVerifier EqualityVerifier 
    /** * Test the "default" IP addresses is the local IP addr */ @Test public void testGetIPWithDefault() throws Exception { String[] ips=DNS.getIPs(DEFAULT); assertEquals("Should only return 1 default IP",1,ips.length); assertEquals(getLocalIPAddr().getHostAddress(),ips[0].toString()); String ip=DNS.getDefaultIP(DEFAULT); assertEquals(ip,ips[0].toString()); }

    Class: org.apache.hadoop.net.TestNetUtils

    EqualityVerifier 
    @Test public void testCanonicalUriWithNoHost(){ URI uri=NetUtils.getCanonicalUri(URI.create("scheme://:123/path"),2); assertEquals("scheme://:123/path",uri.toString()); }

    EqualityVerifier 
    @Test public void testCanonicalUriWithNoAuthority(){ URI uri; uri=NetUtils.getCanonicalUri(URI.create("scheme:/"),2); assertEquals("scheme:/",uri.toString()); uri=NetUtils.getCanonicalUri(URI.create("scheme:/path"),2); assertEquals("scheme:/path",uri.toString()); uri=NetUtils.getCanonicalUri(URI.create("scheme:///"),2); assertEquals("scheme:///",uri.toString()); uri=NetUtils.getCanonicalUri(URI.create("scheme:///path"),2); assertEquals("scheme:///path",uri.toString()); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testCreateSocketAddress() throws Throwable { InetSocketAddress addr=NetUtils.createSocketAddr("127.0.0.1:12345",1000,"myconfig"); assertEquals("127.0.0.1",addr.getAddress().getHostAddress()); assertEquals(12345,addr.getPort()); addr=NetUtils.createSocketAddr("127.0.0.1",1000,"myconfig"); assertEquals("127.0.0.1",addr.getAddress().getHostAddress()); assertEquals(1000,addr.getPort()); try { addr=NetUtils.createSocketAddr("127.0.0.1:blahblah",1000,"myconfig"); fail("Should have failed to parse bad port"); } catch ( IllegalArgumentException iae) { assertInException(iae,"myconfig"); } }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testGetConnectAddress() throws IOException { NetUtils.addStaticResolution("host","127.0.0.1"); InetSocketAddress addr=NetUtils.createSocketAddrForHost("host",1); InetSocketAddress connectAddr=NetUtils.getConnectAddress(addr); assertEquals(addr.getHostName(),connectAddr.getHostName()); addr=new InetSocketAddress(1); connectAddr=NetUtils.getConnectAddress(addr); assertEquals(InetAddress.getLocalHost().getHostName(),connectAddr.getHostName()); }

    EqualityVerifier 
    @Test public void testCanonicalUriWithDefaultPort(){ URI uri; uri=NetUtils.getCanonicalUri(URI.create("scheme://host"),123); assertEquals("scheme://host.a.b:123",uri.toString()); uri=NetUtils.getCanonicalUri(URI.create("scheme://host/"),123); assertEquals("scheme://host.a.b:123/",uri.toString()); uri=NetUtils.getCanonicalUri(URI.create("scheme://host/path"),123); assertEquals("scheme://host.a.b:123/path",uri.toString()); uri=NetUtils.getCanonicalUri(URI.create("scheme://host/path?q#frag"),123); assertEquals("scheme://host.a.b:123/path?q#frag",uri.toString()); }

    EqualityVerifier 
    @Test public void testCanonicalUriWithNoPortNoDefaultPort(){ URI uri=NetUtils.getCanonicalUri(URI.create("scheme://host/path"),-1); assertEquals("scheme://host.a.b/path",uri.toString()); }

    EqualityVerifier 
    @Test public void testCanonicalUriWithPath(){ URI uri; uri=NetUtils.getCanonicalUri(URI.create("path"),2); assertEquals("path",uri.toString()); uri=NetUtils.getCanonicalUri(URI.create("/path"),2); assertEquals("/path",uri.toString()); }

    EqualityVerifier 
    @Test public void testCanonicalUriWithPort(){ URI uri; uri=NetUtils.getCanonicalUri(URI.create("scheme://host:123"),456); assertEquals("scheme://host.a.b:123",uri.toString()); uri=NetUtils.getCanonicalUri(URI.create("scheme://host:123/"),456); assertEquals("scheme://host.a.b:123/",uri.toString()); uri=NetUtils.getCanonicalUri(URI.create("scheme://host:123/path"),456); assertEquals("scheme://host.a.b:123/path",uri.toString()); uri=NetUtils.getCanonicalUri(URI.create("scheme://host:123/path?q#frag"),456); assertEquals("scheme://host.a.b:123/path?q#frag",uri.toString()); }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test for {@link NetUtils#normalizeHostNames} */ @Test public void testNormalizeHostName(){ List hosts=Arrays.asList(new String[]{"127.0.0.1","localhost","1.kanyezone.appspot.com","UnknownHost123"}); List normalizedHosts=NetUtils.normalizeHostNames(hosts); assertEquals(normalizedHosts.get(0),hosts.get(0)); assertFalse(normalizedHosts.get(1).equals(hosts.get(1))); assertEquals(normalizedHosts.get(1),hosts.get(0)); assertFalse(normalizedHosts.get(2).equals(hosts.get(2))); assertEquals(normalizedHosts.get(3),hosts.get(3)); }

    Class: org.apache.hadoop.net.TestNetworkTopology

    EqualityVerifier 
    @Test public void testNumOfChildren() throws Exception { assertEquals(cluster.getNumOfLeaves(),dataNodes.length); }

    IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRemove() throws Exception { for (int i=0; i < dataNodes.length; i++) { cluster.remove(dataNodes[i]); } for (int i=0; i < dataNodes.length; i++) { assertFalse(cluster.contains(dataNodes[i])); } assertEquals(0,cluster.getNumOfLeaves()); for (int i=0; i < dataNodes.length; i++) { cluster.add(dataNodes[i]); } }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=180000) public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception { Configuration conf=new HdfsConfiguration(); MiniDFSCluster cluster=null; try { String racks[]={"/a/b","/c"}; String hosts[]={"foo1.example.com","foo2.example.com"}; cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(racks).hosts(hosts).build(); cluster.waitActive(); NamenodeProtocols nn=cluster.getNameNodeRpc(); Assert.assertNotNull(nn); DatanodeInfo[] info; while (true) { info=nn.getDatanodeReport(DatanodeReportType.LIVE); Assert.assertFalse(info.length == 2); if (info.length == 1) { break; } Thread.sleep(1000); } int validIdx=info[0].getHostName().equals(hosts[0]) ? 0 : 1; int invalidIdx=validIdx == 1 ? 0 : 1; StaticMapping.addNodeToRack(hosts[invalidIdx],racks[validIdx]); LOG.info("datanode " + validIdx + " came up with network location "+ info[0].getNetworkLocation()); cluster.restartDataNode(invalidIdx); Thread.sleep(5000); while (true) { info=nn.getDatanodeReport(DatanodeReportType.LIVE); if (info.length == 2) { break; } if (info.length == 0) { LOG.info("got no valid DNs"); } else if (info.length == 1) { LOG.info("got one valid DN: " + info[0].getHostName() + " (at "+ info[0].getNetworkLocation()+ ")"); } Thread.sleep(1000); } Assert.assertEquals(info[0].getNetworkLocation(),info[1].getNetworkLocation()); } finally { if (cluster != null) { cluster.shutdown(); } } }

    APIUtilityVerifier IterativeVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * This test checks that chooseRandom works for an excluded rack. */ @Test public void testChooseRandomExcludedRack(){ Map frequency=pickNodesAtRandom(100,"~" + "/d2"); for (int j=0; j < dataNodes.length; j++) { int freq=frequency.get(dataNodes[j]); if (dataNodes[j].getNetworkLocation().startsWith("/d2")) { assertEquals(0,freq); } else { assertTrue(freq > 0); } } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRacks() throws Exception { assertEquals(cluster.getNumOfRacks(),6); assertTrue(cluster.isOnSameRack(dataNodes[0],dataNodes[1])); assertFalse(cluster.isOnSameRack(dataNodes[1],dataNodes[2])); assertTrue(cluster.isOnSameRack(dataNodes[2],dataNodes[3])); assertTrue(cluster.isOnSameRack(dataNodes[3],dataNodes[4])); assertFalse(cluster.isOnSameRack(dataNodes[4],dataNodes[5])); assertTrue(cluster.isOnSameRack(dataNodes[5],dataNodes[6])); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetDistance() throws Exception { assertEquals(cluster.getDistance(dataNodes[0],dataNodes[0]),0); assertEquals(cluster.getDistance(dataNodes[0],dataNodes[1]),2); assertEquals(cluster.getDistance(dataNodes[0],dataNodes[3]),4); assertEquals(cluster.getDistance(dataNodes[0],dataNodes[6]),6); }

    Class: org.apache.hadoop.net.TestNetworkTopologyWithNodeGroup

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodeGroups() throws Exception { assertEquals(3,cluster.getNumOfRacks()); assertTrue(cluster.isOnSameNodeGroup(dataNodes[0],dataNodes[1])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[1],dataNodes[2])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[2],dataNodes[3])); assertTrue(cluster.isOnSameNodeGroup(dataNodes[3],dataNodes[4])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[4],dataNodes[5])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[5],dataNodes[6])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[6],dataNodes[7])); }

    EqualityVerifier 
    @Test public void testNumOfRacks() throws Exception { assertEquals(3,cluster.getNumOfRacks()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetDistance() throws Exception { assertEquals(0,cluster.getDistance(dataNodes[0],dataNodes[0])); assertEquals(2,cluster.getDistance(dataNodes[0],dataNodes[1])); assertEquals(4,cluster.getDistance(dataNodes[0],dataNodes[2])); assertEquals(6,cluster.getDistance(dataNodes[0],dataNodes[3])); assertEquals(8,cluster.getDistance(dataNodes[0],dataNodes[6])); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRacks() throws Exception { assertEquals(3,cluster.getNumOfRacks()); assertTrue(cluster.isOnSameRack(dataNodes[0],dataNodes[1])); assertTrue(cluster.isOnSameRack(dataNodes[1],dataNodes[2])); assertFalse(cluster.isOnSameRack(dataNodes[2],dataNodes[3])); assertTrue(cluster.isOnSameRack(dataNodes[3],dataNodes[4])); assertTrue(cluster.isOnSameRack(dataNodes[4],dataNodes[5])); assertFalse(cluster.isOnSameRack(dataNodes[5],dataNodes[6])); assertTrue(cluster.isOnSameRack(dataNodes[6],dataNodes[7])); }

    EqualityVerifier 
    @Test public void testNumOfChildren() throws Exception { assertEquals(dataNodes.length,cluster.getNumOfLeaves()); }

    Class: org.apache.hadoop.net.TestSocketIOWithTimeout

    APIUtilityVerifier BranchVerifier UtilityVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test public void testSocketIOWithTimeout() throws Exception { Pipe pipe=Pipe.open(); Pipe.SourceChannel source=pipe.source(); Pipe.SinkChannel sink=pipe.sink(); try { final InputStream in=new SocketInputStream(source,TIMEOUT); OutputStream out=new SocketOutputStream(sink,TIMEOUT); byte[] writeBytes=TEST_STRING.getBytes(); byte[] readBytes=new byte[writeBytes.length]; byte byteWithHighBit=(byte)0x80; out.write(writeBytes); out.write(byteWithHighBit); doIO(null,out,TIMEOUT); in.read(readBytes); assertTrue(Arrays.equals(writeBytes,readBytes)); assertEquals(byteWithHighBit & 0xff,in.read()); doIO(in,null,TIMEOUT); ((SocketInputStream)in).setTimeout(TIMEOUT * 2); doIO(in,null,TIMEOUT * 2); ((SocketInputStream)in).setTimeout(0); TestingThread thread=new TestingThread(ctx){ @Override public void doWork() throws Exception { try { in.read(); fail("Did not fail with interrupt"); } catch ( InterruptedIOException ste) { LOG.info("Got expection while reading as expected : " + ste.getMessage()); } } } ; ctx.addThread(thread); ctx.startThreads(); Thread.sleep(1000); thread.interrupt(); ctx.stop(); assertTrue(source.isOpen()); assertTrue(sink.isOpen()); if (!Shell.WINDOWS && !Shell.PPC_64) { try { out.write(1); fail("Did not throw"); } catch ( IOException ioe) { GenericTestUtils.assertExceptionContains("stream is closed",ioe); } } out.close(); assertFalse(sink.isOpen()); assertEquals(-1,in.read()); in.close(); assertFalse(source.isOpen()); } finally { if (source != null) { source.close(); } if (sink != null) { sink.close(); } } }

    Class: org.apache.hadoop.net.TestStaticMapping

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Verify that a configuration string builds a topology */ @Test public void testReadNodesFromConfig() throws Throwable { StaticMapping mapping=newInstance(); Configuration conf=new Configuration(); conf.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING,"n1=/r1,n2=/r2"); mapping.setConf(conf); assertSingleSwitch(mapping); List l1=new ArrayList(3); l1.add("n1"); l1.add("unknown"); l1.add("n2"); List resolved=mapping.resolve(l1); assertEquals(3,resolved.size()); assertEquals("/r1",resolved.get(0)); assertEquals(NetworkTopology.DEFAULT_RACK,resolved.get(1)); assertEquals("/r2",resolved.get(2)); Map switchMap=mapping.getSwitchMap(); String topology=mapping.dumpTopology(); LOG.info(topology); assertEquals(topology,2,switchMap.size()); assertEquals(topology,"/r1",switchMap.get("n1")); assertNull(topology,switchMap.get("unknown")); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAddResolveNodes() throws Throwable { StaticMapping mapping=newInstance(); StaticMapping.addNodeToRack("n1","/r1"); List queryList=createQueryList(); List resolved=mapping.resolve(queryList); assertEquals(2,resolved.size()); assertEquals("/r1",resolved.get(0)); assertEquals(NetworkTopology.DEFAULT_RACK,resolved.get(1)); Map switchMap=mapping.getSwitchMap(); String topology=mapping.dumpTopology(); LOG.info(topology); assertEquals(topology,1,switchMap.size()); assertEquals(topology,"/r1",switchMap.get("n1")); }

    Class: org.apache.hadoop.net.TestTableMapping

    InternalCallVerifier EqualityVerifier 
    @Test public void testFileDoesNotExist(){ TableMapping mapping=new TableMapping(); Configuration conf=new Configuration(); conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,"/this/file/does/not/exist"); mapping.setConf(conf); List names=new ArrayList(); names.add("a.b.c"); names.add("1.2.3.4"); List result=mapping.resolve(names); assertEquals(names.size(),result.size()); assertEquals(result.get(0),NetworkTopology.DEFAULT_RACK); assertEquals(result.get(1),NetworkTopology.DEFAULT_RACK); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testResolve() throws IOException { File mapFile=File.createTempFile(getClass().getSimpleName() + ".testResolve",".txt"); Files.write("a.b.c /rack1\n" + "1.2.3.4\t/rack2\n",mapFile,Charsets.UTF_8); mapFile.deleteOnExit(); TableMapping mapping=new TableMapping(); Configuration conf=new Configuration(); conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,mapFile.getCanonicalPath()); mapping.setConf(conf); List names=new ArrayList(); names.add("a.b.c"); names.add("1.2.3.4"); List result=mapping.resolve(names); assertEquals(names.size(),result.size()); assertEquals("/rack1",result.get(0)); assertEquals("/rack2",result.get(1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testTableCaching() throws IOException { File mapFile=File.createTempFile(getClass().getSimpleName() + ".testTableCaching",".txt"); Files.write("a.b.c /rack1\n" + "1.2.3.4\t/rack2\n",mapFile,Charsets.UTF_8); mapFile.deleteOnExit(); TableMapping mapping=new TableMapping(); Configuration conf=new Configuration(); conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,mapFile.getCanonicalPath()); mapping.setConf(conf); List names=new ArrayList(); names.add("a.b.c"); names.add("1.2.3.4"); List result1=mapping.resolve(names); assertEquals(names.size(),result1.size()); assertEquals("/rack1",result1.get(0)); assertEquals("/rack2",result1.get(1)); conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,"some bad value for a file"); List result2=mapping.resolve(names); assertEquals(result1,result2); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClearingCachedMappings() throws IOException { File mapFile=File.createTempFile(getClass().getSimpleName() + ".testClearingCachedMappings",".txt"); Files.write("a.b.c /rack1\n" + "1.2.3.4\t/rack2\n",mapFile,Charsets.UTF_8); mapFile.deleteOnExit(); TableMapping mapping=new TableMapping(); Configuration conf=new Configuration(); conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,mapFile.getCanonicalPath()); mapping.setConf(conf); List names=new ArrayList(); names.add("a.b.c"); names.add("1.2.3.4"); List result=mapping.resolve(names); assertEquals(names.size(),result.size()); assertEquals("/rack1",result.get(0)); assertEquals("/rack2",result.get(1)); Files.write("",mapFile,Charsets.UTF_8); mapping.reloadCachedMappings(); names=new ArrayList(); names.add("a.b.c"); names.add("1.2.3.4"); result=mapping.resolve(names); assertEquals(names.size(),result.size()); assertEquals(NetworkTopology.DEFAULT_RACK,result.get(0)); assertEquals(NetworkTopology.DEFAULT_RACK,result.get(1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNoFile(){ TableMapping mapping=new TableMapping(); Configuration conf=new Configuration(); mapping.setConf(conf); List names=new ArrayList(); names.add("a.b.c"); names.add("1.2.3.4"); List result=mapping.resolve(names); assertEquals(names.size(),result.size()); assertEquals(NetworkTopology.DEFAULT_RACK,result.get(0)); assertEquals(NetworkTopology.DEFAULT_RACK,result.get(1)); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testBadFile() throws IOException { File mapFile=File.createTempFile(getClass().getSimpleName() + ".testBadFile",".txt"); Files.write("bad contents",mapFile,Charsets.UTF_8); mapFile.deleteOnExit(); TableMapping mapping=new TableMapping(); Configuration conf=new Configuration(); conf.set(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY,mapFile.getCanonicalPath()); mapping.setConf(conf); List names=new ArrayList(); names.add("a.b.c"); names.add("1.2.3.4"); List result=mapping.resolve(names); assertEquals(names.size(),result.size()); assertEquals(result.get(0),NetworkTopology.DEFAULT_RACK); assertEquals(result.get(1),NetworkTopology.DEFAULT_RACK); }

    Class: org.apache.hadoop.net.unix.TestDomainSocket

    APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test setting some server options. * @throws IOException */ @Test(timeout=180000) public void testServerOptions() throws Exception { final String TEST_PATH=new File(sockDir.getDir(),"test_sock_server_options").getAbsolutePath(); DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH); try { int bufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE); int newBufSize=bufSize / 2; serv.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE,newBufSize); int nextBufSize=serv.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE); Assert.assertEquals(newBufSize,nextBufSize); int newTimeout=1000; serv.setAttribute(DomainSocket.RECEIVE_TIMEOUT,newTimeout); int nextTimeout=serv.getAttribute(DomainSocket.RECEIVE_TIMEOUT); Assert.assertEquals(newTimeout,nextTimeout); try { serv.accept(); Assert.fail("expected the accept() to time out and fail"); } catch ( SocketTimeoutException e) { GenericTestUtils.assertExceptionContains("accept(2) error: ",e); } } finally { serv.close(); Assert.assertFalse(serv.isOpen()); } }

    EqualityVerifier 
    /** * Test DomainSocket path setting and getting. * @throws IOException */ @Test(timeout=180000) public void testSocketPathSetGet() throws IOException { Assert.assertEquals("/var/run/hdfs/sock.100",DomainSocket.getEffectivePath("/var/run/hdfs/sock._PORT",100)); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Test that we get a read result of -1 on EOF. * @throws IOException */ @Test(timeout=180000) public void testSocketReadEof() throws Exception { final String TEST_PATH=new File(sockDir.getDir(),"testSocketReadEof").getAbsolutePath(); final DomainSocket serv=DomainSocket.bindAndListen(TEST_PATH); ExecutorService exeServ=Executors.newSingleThreadExecutor(); Callable callable=new Callable(){ public Void call(){ DomainSocket conn; try { conn=serv.accept(); } catch ( IOException e) { throw new RuntimeException("unexpected IOException",e); } byte buf[]=new byte[100]; for (int i=0; i < buf.length; i++) { buf[i]=0; } try { Assert.assertEquals(-1,conn.getInputStream().read()); } catch ( IOException e) { throw new RuntimeException("unexpected IOException",e); } return null; } } ; Future future=exeServ.submit(callable); DomainSocket conn=DomainSocket.connect(serv.getPath()); Thread.sleep(50); conn.close(); serv.close(); future.get(2,TimeUnit.MINUTES); }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=180000) public void testShutdown() throws Exception { final AtomicInteger bytesRead=new AtomicInteger(0); final AtomicBoolean failed=new AtomicBoolean(false); final DomainSocket[] socks=DomainSocket.socketpair(); Runnable reader=new Runnable(){ @Override public void run(){ while (true) { try { int ret=socks[1].getInputStream().read(); if (ret == -1) return; bytesRead.addAndGet(1); } catch ( IOException e) { DomainSocket.LOG.error("reader error",e); failed.set(true); return; } } } } ; Thread readerThread=new Thread(reader); readerThread.start(); socks[0].getOutputStream().write(1); socks[0].getOutputStream().write(2); socks[0].getOutputStream().write(3); Assert.assertTrue(readerThread.isAlive()); socks[0].shutdown(); readerThread.join(); Assert.assertFalse(failed.get()); Assert.assertEquals(3,bytesRead.get()); IOUtils.cleanup(null,socks); }

    Class: org.apache.hadoop.nfs.TestNfsExports

    InternalCallVerifier EqualityVerifier 
    @Test public void testMultiMatchers() throws Exception { long shortExpirationPeriod=1 * 1000 * 1000* 1000; NfsExports matcher=new NfsExports(CacheSize,shortExpirationPeriod,"192.168.0.[0-9]+;[a-z]+.b.com rw"); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname2)); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,address1)); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address2,hostname1)); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address2,hostname2)); Thread.sleep(1000); AccessPrivilege ap; long startNanos=System.nanoTime(); do { ap=matcher.getAccessPrivilege(address2,address2); if (ap == AccessPrivilege.NONE) { break; } Thread.sleep(500); } while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000); Assert.assertEquals(AccessPrivilege.NONE,ap); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testExactAddressRW(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,address1 + " rw"); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertFalse(AccessPrivilege.READ_WRITE == matcher.getAccessPrivilege(address2,hostname1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRegexHostRO(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"[a-z]+.b.com"); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname2)); }

    EqualityVerifier 
    @Test public void testExactHostRW(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,hostname1 + " rw"); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1)); }

    EqualityVerifier 
    @Test public void testWildcardRO(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"* ro"); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRegexIPRW(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.[0-9]+ rw"); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testCidrShortRW(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.0/22 rw"); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testCidrLongRW(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.0/255.255.252.0 rw"); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testExactAddressRO(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,address1); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRegexHostRW(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"[a-z]+.b.com rw"); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname2)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRegexIPRO(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.[0-9]+"); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1)); }

    EqualityVerifier 
    @Test public void testExactHostRO(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,hostname1); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1)); }

    EqualityVerifier 
    @Test public void testWildcardRW(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"* rw"); Assert.assertEquals(AccessPrivilege.READ_WRITE,matcher.getAccessPrivilege(address1,hostname1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testCidrLongRO(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.0/255.255.252.0"); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testCidrShortRO(){ NfsExports matcher=new NfsExports(CacheSize,ExpirationPeriod,"192.168.0.0/22"); Assert.assertEquals(AccessPrivilege.READ_ONLY,matcher.getAccessPrivilege(address1,hostname1)); Assert.assertEquals(AccessPrivilege.NONE,matcher.getAccessPrivilege(address2,hostname1)); }

    Class: org.apache.hadoop.nfs.TestNfsTime

    InternalCallVerifier EqualityVerifier 
    @Test public void testConstructor(){ NfsTime nfstime=new NfsTime(1001); Assert.assertEquals(1,nfstime.getSeconds()); Assert.assertEquals(1000000,nfstime.getNseconds()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSerializeDeserialize(){ NfsTime t1=new NfsTime(1001); XDR xdr=new XDR(); t1.serialize(xdr); NfsTime t2=NfsTime.deserialize(xdr.asReadOnlyWrap()); Assert.assertEquals(t1,t2); }

    Class: org.apache.hadoop.nfs.nfs3.TestFileHandle

    InternalCallVerifier EqualityVerifier 
    @Test public void testConstructor(){ FileHandle handle=new FileHandle(1024); XDR xdr=new XDR(); handle.serialize(xdr); Assert.assertEquals(handle.getFileId(),1024); FileHandle handle2=new FileHandle(); handle2.deserialize(xdr.asReadOnlyWrap()); Assert.assertEquals("Failed: Assert 1024 is id ",1024,handle.getFileId()); }

    Class: org.apache.hadoop.nfs.nfs3.TestIdUserGroup

    InternalCallVerifier EqualityVerifier 
    @Test public void testStaticMapping() throws IOException { Map uidStaticMap=new PassThroughMap(); Map gidStaticMap=new PassThroughMap(); uidStaticMap.put(11501,10); gidStaticMap.put(497,200); BiMap uMap=HashBiMap.create(); BiMap gMap=HashBiMap.create(); String GET_ALL_USERS_CMD="echo \"atm:x:1000:1000:Aaron T. Myers,,,:/home/atm:/bin/bash\n" + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\"" + " | cut -d: -f1,3"; String GET_ALL_GROUPS_CMD="echo \"hdfs:*:11501:hrt_hdfs\n" + "mapred:x:497\n" + "mapred2:x:498\""+ " | cut -d: -f1,3"; IdUserGroup.updateMapInternal(uMap,"user",GET_ALL_USERS_CMD,":",uidStaticMap); IdUserGroup.updateMapInternal(gMap,"group",GET_ALL_GROUPS_CMD,":",gidStaticMap); assertEquals("hdfs",uMap.get(10)); assertEquals(10,(int)uMap.inverse().get("hdfs")); assertEquals("atm",uMap.get(1000)); assertEquals(1000,(int)uMap.inverse().get("atm")); assertEquals("hdfs",gMap.get(11501)); assertEquals(11501,(int)gMap.inverse().get("hdfs")); assertEquals("mapred",gMap.get(200)); assertEquals(200,(int)gMap.inverse().get("mapred")); assertEquals("mapred2",gMap.get(498)); assertEquals(498,(int)gMap.inverse().get("mapred2")); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testStaticMapParsing() throws IOException { File tempStaticMapFile=File.createTempFile("nfs-",".map"); final String staticMapFileContents="uid 10 100\n" + "gid 10 200\n" + "uid 11 201 # comment at the end of a line\n"+ "uid 12 301\n"+ "# Comment at the beginning of a line\n"+ " # Comment that starts late in the line\n"+ "uid 10000 10001# line without whitespace before comment\n"+ "uid 13 302\n"+ "gid\t11\t201\n"+ "\n"+ "gid 12 202"; OutputStream out=new FileOutputStream(tempStaticMapFile); out.write(staticMapFileContents.getBytes()); out.close(); StaticMapping parsedMap=IdUserGroup.parseStaticMap(tempStaticMapFile); assertEquals(10,(int)parsedMap.uidMapping.get(100)); assertEquals(11,(int)parsedMap.uidMapping.get(201)); assertEquals(12,(int)parsedMap.uidMapping.get(301)); assertEquals(13,(int)parsedMap.uidMapping.get(302)); assertEquals(10,(int)parsedMap.gidMapping.get(200)); assertEquals(11,(int)parsedMap.gidMapping.get(201)); assertEquals(12,(int)parsedMap.gidMapping.get(202)); assertEquals(10000,(int)parsedMap.uidMapping.get(10001)); assertEquals(1000,(int)parsedMap.uidMapping.get(1000)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testUserUpdateSetting() throws IOException { IdUserGroup iug=new IdUserGroup(new Configuration()); assertEquals(iug.getTimeout(),Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT); Configuration conf=new Configuration(); conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,0); iug=new IdUserGroup(conf); assertEquals(iug.getTimeout(),Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN); conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2); iug=new IdUserGroup(conf); assertEquals(iug.getTimeout(),Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testIdOutOfIntegerRange() throws IOException { String GET_ALL_USERS_CMD="echo \"" + "nfsnobody:x:4294967294:4294967294:Anonymous NFS User:/var/lib/nfs:/sbin/nologin\n" + "nfsnobody1:x:4294967295:4294967295:Anonymous NFS User:/var/lib/nfs1:/sbin/nologin\n"+ "maxint:x:2147483647:2147483647:Grid Distributed File System:/home/maxint:/bin/bash\n"+ "minint:x:2147483648:2147483648:Grid Distributed File System:/home/minint:/bin/bash\n"+ "archivebackup:*:1031:4294967294:Archive Backup:/home/users/archivebackup:/bin/sh\n"+ "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""+ " | cut -d: -f1,3"; String GET_ALL_GROUPS_CMD="echo \"" + "hdfs:*:11501:hrt_hdfs\n" + "rpcuser:*:29:\n"+ "nfsnobody:*:4294967294:\n"+ "nfsnobody1:*:4294967295:\n"+ "maxint:*:2147483647:\n"+ "minint:*:2147483648:\n"+ "mapred3:x:498\""+ " | cut -d: -f1,3"; BiMap uMap=HashBiMap.create(); BiMap gMap=HashBiMap.create(); IdUserGroup.updateMapInternal(uMap,"user",GET_ALL_USERS_CMD,":",EMPTY_PASS_THROUGH_MAP); assertTrue(uMap.size() == 7); assertEquals("nfsnobody",uMap.get(-2)); assertEquals("nfsnobody1",uMap.get(-1)); assertEquals("maxint",uMap.get(2147483647)); assertEquals("minint",uMap.get(-2147483648)); assertEquals("archivebackup",uMap.get(1031)); assertEquals("hdfs",uMap.get(11501)); assertEquals("daemon",uMap.get(2)); IdUserGroup.updateMapInternal(gMap,"group",GET_ALL_GROUPS_CMD,":",EMPTY_PASS_THROUGH_MAP); assertTrue(gMap.size() == 7); assertEquals("hdfs",gMap.get(11501)); assertEquals("rpcuser",gMap.get(29)); assertEquals("nfsnobody",gMap.get(-2)); assertEquals("nfsnobody1",gMap.get(-1)); assertEquals("maxint",gMap.get(2147483647)); assertEquals("minint",gMap.get(-2147483648)); assertEquals("mapred3",gMap.get(498)); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testDuplicates() throws IOException { String GET_ALL_USERS_CMD="echo \"root:x:0:0:root:/root:/bin/bash\n" + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n" + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"+ "bin:x:2:2:bin:/bin:/bin/sh\n"+ "bin:x:1:1:bin:/bin:/sbin/nologin\n"+ "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n"+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""+ " | cut -d: -f1,3"; String GET_ALL_GROUPS_CMD="echo \"hdfs:*:11501:hrt_hdfs\n" + "mapred:x:497\n" + "mapred2:x:497\n"+ "mapred:x:498\n"+ "mapred3:x:498\""+ " | cut -d: -f1,3"; BiMap uMap=HashBiMap.create(); BiMap gMap=HashBiMap.create(); IdUserGroup.updateMapInternal(uMap,"user",GET_ALL_USERS_CMD,":",EMPTY_PASS_THROUGH_MAP); assertEquals(5,uMap.size()); assertEquals("root",uMap.get(0)); assertEquals("hdfs",uMap.get(11501)); assertEquals("hdfs2",uMap.get(11502)); assertEquals("bin",uMap.get(2)); assertEquals("daemon",uMap.get(1)); IdUserGroup.updateMapInternal(gMap,"group",GET_ALL_GROUPS_CMD,":",EMPTY_PASS_THROUGH_MAP); assertTrue(gMap.size() == 3); assertEquals("hdfs",gMap.get(11501)); assertEquals("mapred",gMap.get(497)); assertEquals("mapred3",gMap.get(498)); }

    Class: org.apache.hadoop.oncrpc.TestFrameDecoder

    InternalCallVerifier EqualityVerifier 
    @Test public void testUnprivilegedPort(){ int serverPort=startRpcServer(false); XDR xdrOut=createGetportMount(); int bufsize=2 * 1024 * 1024; byte[] buffer=new byte[bufsize]; xdrOut.writeFixedOpaque(buffer); testRequest(xdrOut,serverPort); assertEquals(0,resultSize); xdrOut=new XDR(); createPortmapXDRheader(xdrOut,0); int headerSize=xdrOut.size(); buffer=new byte[bufsize]; xdrOut.writeFixedOpaque(buffer); int requestSize=xdrOut.size() - headerSize; testRequest(xdrOut,serverPort); assertEquals(requestSize,resultSize); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMultipleFrames(){ RpcFrameDecoder decoder=new RpcFrameDecoder(); byte[] fragment1=new byte[4 + 10]; fragment1[0]=0; fragment1[1]=0; fragment1[2]=0; fragment1[3]=(byte)10; assertFalse(XDR.isLastFragment(fragment1)); assertTrue(XDR.fragmentSize(fragment1) == 10); ByteBuffer buffer=ByteBuffer.allocate(4 + 10); buffer.put(fragment1); buffer.flip(); ChannelBuffer buf=new ByteBufferBackedChannelBuffer(buffer); ChannelBuffer channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf); assertTrue(channelBuffer == null); byte[] fragment2=new byte[4 + 10]; fragment2[0]=(byte)(1 << 7); fragment2[1]=0; fragment2[2]=0; fragment2[3]=(byte)10; assertTrue(XDR.isLastFragment(fragment2)); assertTrue(XDR.fragmentSize(fragment2) == 10); buffer=ByteBuffer.allocate(4 + 10); buffer.put(fragment2); buffer.flip(); buf=new ByteBufferBackedChannelBuffer(buffer); channelBuffer=(ChannelBuffer)decoder.decode(Mockito.mock(ChannelHandlerContext.class),Mockito.mock(Channel.class),buf); assertTrue(channelBuffer != null); assertEquals(20,channelBuffer.readableBytes()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testFrames(){ int serverPort=startRpcServer(true); XDR xdrOut=createGetportMount(); int headerSize=xdrOut.size(); int bufsize=2 * 1024 * 1024; byte[] buffer=new byte[bufsize]; xdrOut.writeFixedOpaque(buffer); int requestSize=xdrOut.size() - headerSize; testRequest(xdrOut,serverPort); assertEquals(requestSize,resultSize); }

    Class: org.apache.hadoop.oncrpc.TestRpcAcceptedReply

    InternalCallVerifier EqualityVerifier 
    @Test public void testConstructor(){ Verifier verifier=new VerifierNone(); RpcAcceptedReply reply=new RpcAcceptedReply(0,ReplyState.MSG_ACCEPTED,verifier,AcceptState.SUCCESS); assertEquals(0,reply.getXid()); assertEquals(RpcMessage.Type.RPC_REPLY,reply.getMessageType()); assertEquals(ReplyState.MSG_ACCEPTED,reply.getState()); assertEquals(verifier,reply.getVerifier()); assertEquals(AcceptState.SUCCESS,reply.getAcceptState()); }

    EqualityVerifier 
    @Test public void testAcceptState(){ assertEquals(AcceptState.SUCCESS,AcceptState.fromValue(0)); assertEquals(AcceptState.PROG_UNAVAIL,AcceptState.fromValue(1)); assertEquals(AcceptState.PROG_MISMATCH,AcceptState.fromValue(2)); assertEquals(AcceptState.PROC_UNAVAIL,AcceptState.fromValue(3)); assertEquals(AcceptState.GARBAGE_ARGS,AcceptState.fromValue(4)); assertEquals(AcceptState.SYSTEM_ERR,AcceptState.fromValue(5)); }

    Class: org.apache.hadoop.oncrpc.TestRpcCall

    InternalCallVerifier EqualityVerifier 
    @Test public void testConstructor(){ Credentials credential=new CredentialsNone(); Verifier verifier=new VerifierNone(); int rpcVersion=RpcCall.RPC_VERSION; int program=2; int version=3; int procedure=4; RpcCall call=new RpcCall(0,RpcMessage.Type.RPC_CALL,rpcVersion,program,version,procedure,credential,verifier); assertEquals(0,call.getXid()); assertEquals(RpcMessage.Type.RPC_CALL,call.getMessageType()); assertEquals(rpcVersion,call.getRpcVersion()); assertEquals(program,call.getProgram()); assertEquals(version,call.getVersion()); assertEquals(procedure,call.getProcedure()); assertEquals(credential,call.getCredential()); assertEquals(verifier,call.getVerifier()); }

    Class: org.apache.hadoop.oncrpc.TestRpcCallCache

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testCacheFunctionality() throws UnknownHostException { RpcCallCache cache=new RpcCallCache("Test",10); int size=0; for (int clientId=0; clientId < 20; clientId++) { InetAddress clientIp=InetAddress.getByName("1.1.1." + clientId); System.out.println("Adding " + clientIp); cache.checkOrAddToCache(clientIp,0); size=Math.min(++size,10); System.out.println("Cache size " + cache.size()); assertEquals(size,cache.size()); int startEntry=Math.max(clientId - 10 + 1,0); Iterator> iterator=cache.iterator(); for (int i=0; i < size; i++) { ClientRequest key=iterator.next().getKey(); System.out.println("Entry " + key.getClientId()); assertEquals(InetAddress.getByName("1.1.1." + (startEntry + i)),key.getClientId()); } for (int i=0; i < size; i++) { CacheEntry e=cache.checkOrAddToCache(InetAddress.getByName("1.1.1." + (startEntry + i)),0); assertNotNull(e); assertTrue(e.isInProgress()); assertFalse(e.isCompleted()); } } }

    EqualityVerifier 
    @Test public void testRpcCallCacheConstructor(){ RpcCallCache cache=new RpcCallCache("test",100); assertEquals("test",cache.getProgram()); }

    Class: org.apache.hadoop.oncrpc.TestRpcDeniedReply

    InternalCallVerifier EqualityVerifier 
    @Test public void testConstructor(){ RpcDeniedReply reply=new RpcDeniedReply(0,ReplyState.MSG_ACCEPTED,RejectState.AUTH_ERROR,new VerifierNone()); Assert.assertEquals(0,reply.getXid()); Assert.assertEquals(RpcMessage.Type.RPC_REPLY,reply.getMessageType()); Assert.assertEquals(ReplyState.MSG_ACCEPTED,reply.getState()); Assert.assertEquals(RejectState.AUTH_ERROR,reply.getRejectState()); }

    EqualityVerifier 
    @Test public void testRejectStateFromValue(){ Assert.assertEquals(RejectState.RPC_MISMATCH,RejectState.fromValue(0)); Assert.assertEquals(RejectState.AUTH_ERROR,RejectState.fromValue(1)); }

    Class: org.apache.hadoop.oncrpc.TestRpcMessage

    InternalCallVerifier EqualityVerifier 
    @Test public void testRpcMessage(){ RpcMessage msg=getRpcMessage(0,RpcMessage.Type.RPC_CALL); Assert.assertEquals(0,msg.getXid()); Assert.assertEquals(RpcMessage.Type.RPC_CALL,msg.getMessageType()); }

    Class: org.apache.hadoop.oncrpc.TestRpcReply

    InternalCallVerifier EqualityVerifier 
    @Test public void testRpcReply(){ RpcReply reply=new RpcReply(0,ReplyState.MSG_ACCEPTED,new VerifierNone()){ @Override public XDR write( XDR xdr){ return null; } } ; Assert.assertEquals(0,reply.getXid()); Assert.assertEquals(RpcMessage.Type.RPC_REPLY,reply.getMessageType()); Assert.assertEquals(ReplyState.MSG_ACCEPTED,reply.getState()); }

    EqualityVerifier 
    @Test public void testReplyStateFromValue(){ Assert.assertEquals(ReplyState.MSG_ACCEPTED,ReplyState.fromValue(0)); Assert.assertEquals(ReplyState.MSG_DENIED,ReplyState.fromValue(1)); }

    Class: org.apache.hadoop.oncrpc.security.TestCredentialsSys

    InternalCallVerifier EqualityVerifier 
    @Test public void testReadWrite(){ CredentialsSys credential=new CredentialsSys(); credential.setUID(0); credential.setGID(1); XDR xdr=new XDR(); credential.write(xdr); CredentialsSys newCredential=new CredentialsSys(); newCredential.read(xdr.asReadOnlyWrap()); assertEquals(0,newCredential.getUID()); assertEquals(1,newCredential.getGID()); }

    Class: org.apache.hadoop.oncrpc.security.TestRpcAuthInfo

    EqualityVerifier 
    @Test public void testAuthFlavor(){ assertEquals(AuthFlavor.AUTH_NONE,AuthFlavor.fromValue(0)); assertEquals(AuthFlavor.AUTH_SYS,AuthFlavor.fromValue(1)); assertEquals(AuthFlavor.AUTH_SHORT,AuthFlavor.fromValue(2)); assertEquals(AuthFlavor.AUTH_DH,AuthFlavor.fromValue(3)); assertEquals(AuthFlavor.RPCSEC_GSS,AuthFlavor.fromValue(6)); }

    EqualityVerifier ExceptionVerifier HybridVerifier 
    @Test(expected=IllegalArgumentException.class) public void testInvalidAuthFlavor(){ assertEquals(AuthFlavor.AUTH_NONE,AuthFlavor.fromValue(4)); }

    Class: org.apache.hadoop.security.TestCredentials

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("unchecked") @Test public void testReadWriteStorage() throws IOException, NoSuchAlgorithmException { Credentials ts=new Credentials(); Token token1=new Token(); Token token2=new Token(); Text service1=new Text("service1"); Text service2=new Text("service2"); Collection services=new ArrayList(); services.add(service1); services.add(service2); token1.setService(service1); token2.setService(service2); ts.addToken(new Text("sometoken1"),token1); ts.addToken(new Text("sometoken2"),token2); final KeyGenerator kg=KeyGenerator.getInstance(DEFAULT_HMAC_ALGORITHM); String alias="alias"; Map m=new HashMap(10); for (int i=0; i < 10; i++) { Key key=kg.generateKey(); m.put(new Text(alias + i),key.getEncoded()); ts.addSecretKey(new Text(alias + i),key.getEncoded()); } File tmpFileName=new File(tmpDir,"tokenStorageTest"); DataOutputStream dos=new DataOutputStream(new FileOutputStream(tmpFileName)); ts.write(dos); dos.close(); DataInputStream dis=new DataInputStream(new FileInputStream(tmpFileName)); ts=new Credentials(); ts.readFields(dis); dis.close(); Collection> list=ts.getAllTokens(); assertEquals("getAllTokens should return collection of size 2",list.size(),2); boolean foundFirst=false; boolean foundSecond=false; for ( Token token : list) { if (token.getService().equals(service1)) { foundFirst=true; } if (token.getService().equals(service2)) { foundSecond=true; } } assertTrue("Tokens for services service1 and service2 must be present",foundFirst && foundSecond); int mapLen=m.size(); assertEquals("wrong number of keys in the Storage",mapLen,ts.numberOfSecretKeys()); for ( Text a : m.keySet()) { byte[] kTS=ts.getSecretKey(a); byte[] kLocal=m.get(a); assertTrue("keys don't match for " + a,WritableComparator.compareBytes(kTS,0,kTS.length,kLocal,0,kLocal.length) == 0); } tmpFileName.delete(); }

    IterativeVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @Test public void testAddTokensToUGI(){ UserGroupInformation ugi=UserGroupInformation.createRemoteUser("someone"); Credentials creds=new Credentials(); for (int i=0; i < service.length; i++) { creds.addToken(service[i],token[i]); } ugi.addCredentials(creds); creds=ugi.getCredentials(); for (int i=0; i < service.length; i++) { assertSame(token[i],creds.getToken(service[i])); } assertEquals(service.length,creds.numberOfTokens()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void addAll(){ Credentials creds=new Credentials(); creds.addToken(service[0],token[0]); creds.addToken(service[1],token[1]); creds.addSecretKey(secret[0],secret[0].getBytes()); creds.addSecretKey(secret[1],secret[1].getBytes()); Credentials credsToAdd=new Credentials(); credsToAdd.addToken(service[0],token[3]); credsToAdd.addToken(service[2],token[2]); credsToAdd.addSecretKey(secret[0],secret[3].getBytes()); credsToAdd.addSecretKey(secret[2],secret[2].getBytes()); creds.addAll(credsToAdd); assertEquals(3,creds.numberOfTokens()); assertEquals(3,creds.numberOfSecretKeys()); assertEquals(token[3],creds.getToken(service[0])); assertEquals(secret[3],new Text(creds.getSecretKey(secret[0]))); assertEquals(token[1],creds.getToken(service[1])); assertEquals(secret[1],new Text(creds.getSecretKey(secret[1]))); assertEquals(token[2],creds.getToken(service[2])); assertEquals(secret[2],new Text(creds.getSecretKey(secret[2]))); }

    InternalCallVerifier EqualityVerifier 
    @Test public void mergeAll(){ Credentials creds=new Credentials(); creds.addToken(service[0],token[0]); creds.addToken(service[1],token[1]); creds.addSecretKey(secret[0],secret[0].getBytes()); creds.addSecretKey(secret[1],secret[1].getBytes()); Credentials credsToAdd=new Credentials(); credsToAdd.addToken(service[0],token[3]); credsToAdd.addToken(service[2],token[2]); credsToAdd.addSecretKey(secret[0],secret[3].getBytes()); credsToAdd.addSecretKey(secret[2],secret[2].getBytes()); creds.mergeAll(credsToAdd); assertEquals(3,creds.numberOfTokens()); assertEquals(3,creds.numberOfSecretKeys()); assertEquals(token[0],creds.getToken(service[0])); assertEquals(secret[0],new Text(creds.getSecretKey(secret[0]))); assertEquals(token[1],creds.getToken(service[1])); assertEquals(secret[1],new Text(creds.getSecretKey(secret[1]))); assertEquals(token[2],creds.getToken(service[2])); assertEquals(secret[2],new Text(creds.getSecretKey(secret[2]))); }

    Class: org.apache.hadoop.security.TestDoAsEffectiveUser

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testProxyWithToken() throws Exception { final Configuration conf=new Configuration(masterConf); TestTokenSecretManager sm=new TestTokenSecretManager(); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,conf); UserGroupInformation.setConfiguration(conf); final Server server=new RPC.Builder(conf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); server.start(); final UserGroupInformation current=UserGroupInformation.createRemoteUser(REAL_USER_NAME); final InetSocketAddress addr=NetUtils.getConnectAddress(server); TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()),new Text("SomeSuperUser")); Token token=new Token(tokenId,sm); SecurityUtil.setTokenService(token,addr); UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUserForTesting(PROXY_USER_NAME,current,GROUP_NAMES); proxyUserUgi.addToken(token); refreshConf(conf); String retVal=proxyUserUgi.doAs(new PrivilegedExceptionAction(){ @Override public String run() throws Exception { try { proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,conf); String ret=proxy.aMethod(); return ret; } catch ( Exception e) { e.printStackTrace(); throw e; } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } } } ); Assert.assertEquals(REAL_USER_NAME + " (auth:TOKEN) via SomeSuperUser (auth:SIMPLE)",retVal); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testTokenBySuperUser() throws Exception { TestTokenSecretManager sm=new TestTokenSecretManager(); final Configuration newConf=new Configuration(masterConf); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,newConf); UserGroupInformation.setConfiguration(newConf); final Server server=new RPC.Builder(newConf).setProtocol(TestProtocol.class).setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true).setSecretManager(sm).build(); server.start(); final UserGroupInformation current=UserGroupInformation.createUserForTesting(REAL_USER_NAME,GROUP_NAMES); refreshConf(newConf); final InetSocketAddress addr=NetUtils.getConnectAddress(server); TestTokenIdentifier tokenId=new TestTokenIdentifier(new Text(current.getUserName()),new Text("SomeSuperUser")); Token token=new Token(tokenId,sm); SecurityUtil.setTokenService(token,addr); current.addToken(token); String retVal=current.doAs(new PrivilegedExceptionAction(){ @Override public String run() throws Exception { try { proxy=RPC.getProxy(TestProtocol.class,TestProtocol.versionID,addr,newConf); String ret=proxy.aMethod(); return ret; } catch ( Exception e) { e.printStackTrace(); throw e; } finally { server.stop(); if (proxy != null) { RPC.stopProxy(proxy); } } } } ); String expected=REAL_USER_NAME + " (auth:TOKEN) via SomeSuperUser (auth:SIMPLE)"; Assert.assertEquals(retVal + "!=" + expected,expected,retVal); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Test method for{@link org.apache.hadoop.security.UserGroupInformation#createProxyUser(java.lang.String,org.apache.hadoop.security.UserGroupInformation)}. */ @Test public void testCreateProxyUser() throws Exception { UserGroupInformation realUserUgi=UserGroupInformation.createRemoteUser(REAL_USER_NAME); UserGroupInformation proxyUserUgi=UserGroupInformation.createProxyUser(PROXY_USER_NAME,realUserUgi); UserGroupInformation curUGI=proxyUserUgi.doAs(new PrivilegedExceptionAction(){ @Override public UserGroupInformation run() throws IOException { return UserGroupInformation.getCurrentUser(); } } ); Assert.assertEquals(PROXY_USER_NAME + " (auth:PROXY) via " + REAL_USER_NAME+ " (auth:SIMPLE)",curUGI.toString()); }

    Class: org.apache.hadoop.security.TestGroupsCaching

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNegativeGroupCaching() throws Exception { final String user="negcache"; final String failMessage="Did not throw IOException: "; conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,2); FakeTimer timer=new FakeTimer(); Groups groups=new Groups(conf,timer); groups.cacheGroupsAdd(Arrays.asList(myGroups)); groups.refresh(); FakeGroupMapping.addToBlackList(user); try { groups.getGroups(user); fail(failMessage + "Failed to obtain groups from FakeGroupMapping."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("No groups found for user",e); } try { groups.getGroups(user); fail(failMessage + "The user is in the negative cache."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("No groups found for user",e); } FakeGroupMapping.clearBlackList(); try { groups.getGroups(user); fail(failMessage + "The user is still in the negative cache, even " + "FakeGroupMapping has resumed."); } catch ( IOException e) { GenericTestUtils.assertExceptionContains("No groups found for user",e); } timer.advance(4 * 1000); assertEquals(Arrays.asList(myGroups),groups.getGroups(user)); }

    Class: org.apache.hadoop.security.TestLdapGroupsMapping

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testConfGetPassword() throws Exception { File testDir=new File(System.getProperty("test.build.data","target/test-dir")); Configuration conf=new Configuration(); final String ourUrl=JavaKeyStoreProvider.SCHEME_NAME + "://file/" + testDir+ "/test.jks"; File file=new File(testDir,"test.jks"); file.delete(); conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,ourUrl); CredentialProvider provider=CredentialProviderFactory.getProviders(conf).get(0); char[] bindpass={'b','i','n','d','p','a','s','s'}; char[] storepass={'s','t','o','r','e','p','a','s','s'}; assertEquals(null,provider.getCredentialEntry(LdapGroupsMapping.BIND_PASSWORD_KEY)); assertEquals(null,provider.getCredentialEntry(LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY)); try { provider.createCredentialEntry(LdapGroupsMapping.BIND_PASSWORD_KEY,bindpass); provider.createCredentialEntry(LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY,storepass); provider.flush(); } catch ( Exception e) { e.printStackTrace(); throw e; } assertArrayEquals(bindpass,provider.getCredentialEntry(LdapGroupsMapping.BIND_PASSWORD_KEY).getCredential()); assertArrayEquals(storepass,provider.getCredentialEntry(LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY).getCredential()); LdapGroupsMapping mapping=new LdapGroupsMapping(); Assert.assertEquals("bindpass",mapping.getPassword(conf,LdapGroupsMapping.BIND_PASSWORD_KEY,"")); Assert.assertEquals("storepass",mapping.getPassword(conf,LdapGroupsMapping.LDAP_KEYSTORE_PASSWORD_KEY,"")); Assert.assertEquals("",mapping.getPassword(conf,"invalid-alias","")); }

    EqualityVerifier 
    @Test public void testExtractPassword() throws IOException { File testDir=new File(System.getProperty("test.build.data","target/test-dir")); testDir.mkdirs(); File secretFile=new File(testDir,"secret.txt"); Writer writer=new FileWriter(secretFile); writer.write("hadoop"); writer.close(); LdapGroupsMapping mapping=new LdapGroupsMapping(); Assert.assertEquals("hadoop",mapping.extractPassword(secretFile.getPath())); }

    Class: org.apache.hadoop.security.TestPermission

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testFilePermission() throws Exception { final Configuration conf=new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,true); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); try { FileSystem nnfs=FileSystem.get(conf); assertFalse(nnfs.exists(CHILD_FILE1)); try { nnfs.setOwner(CHILD_FILE1,"foo","bar"); assertTrue(false); } catch ( java.io.FileNotFoundException e) { LOG.info("GOOD: got " + e); } try { nnfs.setPermission(CHILD_FILE1,new FsPermission((short)0777)); assertTrue(false); } catch ( java.io.FileNotFoundException e) { LOG.info("GOOD: got " + e); } FSDataOutputStream out=nnfs.create(CHILD_FILE1,new FsPermission((short)0777),true,1024,(short)1,1024,null); FileStatus status=nnfs.getFileStatus(CHILD_FILE1); assertTrue(status.getPermission().toString().equals("rwxr-xr-x")); nnfs.delete(CHILD_FILE1,false); nnfs.mkdirs(CHILD_DIR1); out=nnfs.create(CHILD_FILE1); status=nnfs.getFileStatus(CHILD_FILE1); assertTrue(status.getPermission().toString().equals("rw-r--r--")); byte data[]=new byte[FILE_LEN]; RAN.nextBytes(data); out.write(data); out.close(); nnfs.setPermission(CHILD_FILE1,new FsPermission("700")); status=nnfs.getFileStatus(CHILD_FILE1); assertTrue(status.getPermission().toString().equals("rwx------")); byte dataIn[]=new byte[FILE_LEN]; FSDataInputStream fin=nnfs.open(CHILD_FILE1); int bytesRead=fin.read(dataIn); assertTrue(bytesRead == FILE_LEN); for (int i=0; i < FILE_LEN; i++) { assertEquals(data[i],dataIn[i]); } nnfs.setPermission(CHILD_FILE1,new FsPermission("755")); status=nnfs.getFileStatus(CHILD_FILE1); assertTrue(status.getPermission().toString().equals("rwxr-xr-x")); nnfs.setPermission(CHILD_FILE1,new FsPermission("744")); status=nnfs.getFileStatus(CHILD_FILE1); assertTrue(status.getPermission().toString().equals("rwxr--r--")); nnfs.setPermission(CHILD_FILE1,new FsPermission("700")); UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); FileSystem userfs=DFSTestUtil.getFileSystemAs(userGroupInfo,conf); userfs.mkdirs(CHILD_DIR1); assertTrue(!canMkdirs(userfs,CHILD_DIR2)); assertTrue(!canCreate(userfs,CHILD_FILE2)); assertTrue(!canOpen(userfs,CHILD_FILE1)); nnfs.setPermission(ROOT_PATH,new FsPermission((short)0755)); nnfs.setPermission(CHILD_DIR1,new FsPermission("777")); nnfs.setPermission(new Path("/"),new FsPermission((short)0777)); final Path RENAME_PATH=new Path("/foo/bar"); userfs.mkdirs(RENAME_PATH); assertTrue(canRename(userfs,RENAME_PATH,CHILD_DIR1)); } finally { cluster.shutdown(); } }

    InternalCallVerifier EqualityVerifier 
    /** * Tests backward compatibility. Configuration can be * either set with old param dfs.umask that takes decimal umasks * or dfs.umaskmode that takes symbolic or octal umask. */ @Test public void testBackwardCompatibility(){ FsPermission perm=new FsPermission((short)18); Configuration conf=new Configuration(); FsPermission.setUMask(conf,perm); assertEquals(18,FsPermission.getUMask(conf).toShort()); perm=new FsPermission((short)18); conf=new Configuration(); conf.set(FsPermission.DEPRECATED_UMASK_LABEL,"18"); assertEquals(18,FsPermission.getUMask(conf).toShort()); conf=new Configuration(); conf.set(FsPermission.DEPRECATED_UMASK_LABEL,"18"); conf.set(FsPermission.UMASK_LABEL,"000"); assertEquals(18,FsPermission.getUMask(conf).toShort()); conf=new Configuration(); conf.set(FsPermission.UMASK_LABEL,"022"); assertEquals(18,FsPermission.getUMask(conf).toShort()); }

    Class: org.apache.hadoop.security.TestProxyUserFromEnv

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Test HADOOP_PROXY_USER for impersonation */ @Test public void testProxyUserFromEnvironment() throws IOException { String proxyUser="foo.bar"; System.setProperty(UserGroupInformation.HADOOP_PROXY_USER,proxyUser); UserGroupInformation ugi=UserGroupInformation.getLoginUser(); assertEquals(proxyUser,ugi.getUserName()); UserGroupInformation realUgi=ugi.getRealUser(); assertNotNull(realUgi); Process pp=Runtime.getRuntime().exec("whoami"); BufferedReader br=new BufferedReader(new InputStreamReader(pp.getInputStream())); String realUser=br.readLine().trim(); int backslashIndex=realUser.indexOf('\\'); if (backslashIndex != -1) { realUser=realUser.substring(backslashIndex + 1); } assertEquals(realUser,realUgi.getUserName()); }

    Class: org.apache.hadoop.security.TestRefreshUserMappings

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGroupMappingRefresh() throws Exception { DFSAdmin admin=new DFSAdmin(config); String[] args=new String[]{"-refreshUserToGroupsMappings"}; Groups groups=Groups.getUserToGroupsMappingService(config); String user=UserGroupInformation.getCurrentUser().getUserName(); System.out.println("first attempt:"); List g1=groups.getGroups(user); String[] str_groups=new String[g1.size()]; g1.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); System.out.println("second attempt, should be same:"); List g2=groups.getGroups(user); g2.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); for (int i=0; i < g2.size(); i++) { assertEquals("Should be same group ",g1.get(i),g2.get(i)); } admin.run(args); System.out.println("third attempt(after refresh command), should be different:"); List g3=groups.getGroups(user); g3.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); for (int i=0; i < g3.size(); i++) { assertFalse("Should be different group: " + g1.get(i) + " and "+ g3.get(i),g1.get(i).equals(g3.get(i))); } Thread.sleep(groupRefreshTimeoutSec * 1100); System.out.println("fourth attempt(after timeout), should be different:"); List g4=groups.getGroups(user); g4.toArray(str_groups); System.out.println(Arrays.toString(str_groups)); for (int i=0; i < g4.size(); i++) { assertFalse("Should be different group ",g3.get(i).equals(g4.get(i))); } }

    Class: org.apache.hadoop.security.TestSecurityUtil

    EqualityVerifier 
    @Test public void testGetServerPrincipal() throws IOException { String service="hdfs/"; String realm="@REALM"; String hostname="foohost"; String userPrincipal="foo@FOOREALM"; String shouldReplace=service + SecurityUtil.HOSTNAME_PATTERN + realm; String replaced=service + hostname + realm; verify(shouldReplace,hostname,replaced); String shouldNotReplace=service + SecurityUtil.HOSTNAME_PATTERN + "NAME"+ realm; verify(shouldNotReplace,hostname,shouldNotReplace); verify(userPrincipal,hostname,userPrincipal); InetAddress notUsed=Mockito.mock(InetAddress.class); assertEquals(shouldNotReplace,SecurityUtil.getServerPrincipal(shouldNotReplace,notUsed)); Mockito.verify(notUsed,Mockito.never()).getCanonicalHostName(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetAuthenticationMethod(){ Configuration conf=new Configuration(); conf.unset(HADOOP_SECURITY_AUTHENTICATION); assertEquals(SIMPLE,SecurityUtil.getAuthenticationMethod(conf)); conf.set(HADOOP_SECURITY_AUTHENTICATION,"simple"); assertEquals(SIMPLE,SecurityUtil.getAuthenticationMethod(conf)); conf.set(HADOOP_SECURITY_AUTHENTICATION,"kerberos"); assertEquals(KERBEROS,SecurityUtil.getAuthenticationMethod(conf)); conf.set(HADOOP_SECURITY_AUTHENTICATION,"kaboom"); String error=null; try { SecurityUtil.getAuthenticationMethod(conf); } catch ( Exception e) { error=e.toString(); } assertEquals("java.lang.IllegalArgumentException: " + "Invalid attribute value for " + HADOOP_SECURITY_AUTHENTICATION + " of kaboom",error); }

    EqualityVerifier 
    @Test public void testBuildDTServiceName(){ SecurityUtil.setTokenServiceUseIp(true); assertEquals("127.0.0.1:123",SecurityUtil.buildDTServiceName(URI.create("test://LocalHost"),123)); assertEquals("127.0.0.1:123",SecurityUtil.buildDTServiceName(URI.create("test://LocalHost:123"),456)); assertEquals("127.0.0.1:123",SecurityUtil.buildDTServiceName(URI.create("test://127.0.0.1"),123)); assertEquals("127.0.0.1:123",SecurityUtil.buildDTServiceName(URI.create("test://127.0.0.1:123"),456)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSetAuthenticationMethod(){ Configuration conf=new Configuration(); SecurityUtil.setAuthenticationMethod(null,conf); assertEquals("simple",conf.get(HADOOP_SECURITY_AUTHENTICATION)); SecurityUtil.setAuthenticationMethod(SIMPLE,conf); assertEquals("simple",conf.get(HADOOP_SECURITY_AUTHENTICATION)); SecurityUtil.setAuthenticationMethod(KERBEROS,conf); assertEquals("kerberos",conf.get(HADOOP_SECURITY_AUTHENTICATION)); }

    EqualityVerifier 
    @Test public void testGetHostFromPrincipal(){ assertEquals("host",SecurityUtil.getHostFromPrincipal("service/host@realm")); assertEquals(null,SecurityUtil.getHostFromPrincipal("service@realm")); }

    EqualityVerifier 
    @Test public void testLocalHostNameForNullOrWild() throws Exception { String local=SecurityUtil.getLocalHostName().toLowerCase(Locale.US); assertEquals("hdfs/" + local + "@REALM",SecurityUtil.getServerPrincipal("hdfs/_HOST@REALM",(String)null)); assertEquals("hdfs/" + local + "@REALM",SecurityUtil.getServerPrincipal("hdfs/_HOST@REALM","0.0.0.0")); }

    EqualityVerifier 
    @Test public void testBuildTokenServiceSockAddr(){ SecurityUtil.setTokenServiceUseIp(true); assertEquals("127.0.0.1:123",SecurityUtil.buildTokenService(new InetSocketAddress("LocalHost",123)).toString()); assertEquals("127.0.0.1:123",SecurityUtil.buildTokenService(new InetSocketAddress("127.0.0.1",123)).toString()); assertEquals("127.0.0.1:123",SecurityUtil.buildTokenService(NetUtils.createSocketAddr("127.0.0.1",123)).toString()); }

    Class: org.apache.hadoop.security.TestUGIWithExternalKdc

    APIUtilityVerifier UtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testLogin() throws IOException { String userPrincipal=System.getProperty("user.principal"); String userKeyTab=System.getProperty("user.keytab"); Assert.assertNotNull("User principal was not specified",userPrincipal); Assert.assertNotNull("User keytab was not specified",userKeyTab); Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI(userPrincipal,userKeyTab); Assert.assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod()); try { UserGroupInformation.loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM",userKeyTab); Assert.fail("Login should have failed"); } catch ( Exception ex) { ex.printStackTrace(); } }

    Class: org.apache.hadoop.security.TestUGIWithSecurityOn

    APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testLogin() throws IOException { String nn1keyTabFilepath=System.getProperty("kdc.resource.dir") + "/keytabs/nn1.keytab"; String user1keyTabFilepath=System.getProperty("kdc.resource.dir") + "/keytabs/user1.keytab"; Configuration conf=new Configuration(); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,conf); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugiNn=UserGroupInformation.loginUserFromKeytabAndReturnUGI("nn1/localhost@EXAMPLE.COM",nn1keyTabFilepath); UserGroupInformation ugiDn=UserGroupInformation.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",user1keyTabFilepath); Assert.assertEquals(AuthenticationMethod.KERBEROS,ugiNn.getAuthenticationMethod()); Assert.assertEquals(AuthenticationMethod.KERBEROS,ugiDn.getAuthenticationMethod()); try { UserGroupInformation.loginUserFromKeytabAndReturnUGI("bogus@EXAMPLE.COM",nn1keyTabFilepath); Assert.fail("Login should have failed"); } catch ( Exception ex) { ex.printStackTrace(); } }

    BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetUGIFromKerberosSubject() throws IOException { String user1keyTabFilepath=System.getProperty("kdc.resource.dir") + "/keytabs/user1.keytab"; UserGroupInformation ugi=UserGroupInformation.loginUserFromKeytabAndReturnUGI("user1@EXAMPLE.COM",user1keyTabFilepath); Set principals=ugi.getSubject().getPrincipals(KerberosPrincipal.class); if (principals.isEmpty()) { Assert.fail("There should be a kerberos principal in the subject."); } else { UserGroupInformation ugi2=UserGroupInformation.getUGIFromSubject(ugi.getSubject()); if (ugi2 != null) { ugi2.doAs(new PrivilegedAction(){ @Override public Object run(){ try { UserGroupInformation ugi3=UserGroupInformation.getCurrentUser(); String doAsUserName=ugi3.getUserName(); assertEquals(doAsUserName,"user1@EXAMPLE.COM"); System.out.println("DO AS USERNAME: " + doAsUserName); } catch ( IOException e) { e.printStackTrace(); } return null; } } ); } } }

    Class: org.apache.hadoop.security.TestUserFromEnv

    EqualityVerifier 
    @Test public void testUserFromEnvironment() throws IOException { System.setProperty(UserGroupInformation.HADOOP_USER_NAME,"randomUser"); Assert.assertEquals("randomUser",UserGroupInformation.getLoginUser().getUserName()); }

    Class: org.apache.hadoop.security.TestUserGroupInformation

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * given user name - get all the groups. * Needs to happen before creating the test users */ @Test(timeout=30000) public void testGetServerSideGroups() throws IOException, InterruptedException { Process pp=Runtime.getRuntime().exec("whoami"); BufferedReader br=new BufferedReader(new InputStreamReader(pp.getInputStream())); String userName=br.readLine().trim(); if (Shell.WINDOWS) { int sp=userName.lastIndexOf('\\'); if (sp != -1) { userName=userName.substring(sp + 1); } userName=userName.toLowerCase(); } pp=Runtime.getRuntime().exec(Shell.WINDOWS ? Shell.WINUTILS + " groups -F" : "id -Gn"); br=new BufferedReader(new InputStreamReader(pp.getInputStream())); String line=br.readLine(); System.out.println(userName + ":" + line); Set groups=new LinkedHashSet(); String[] tokens=line.split(Shell.TOKEN_SEPARATOR_REGEX); for ( String s : tokens) { groups.add(s); } final UserGroupInformation login=UserGroupInformation.getCurrentUser(); String loginUserName=login.getShortUserName(); if (Shell.WINDOWS) { loginUserName=loginUserName.toLowerCase(); } assertEquals(userName,loginUserName); String[] gi=login.getGroupNames(); assertEquals(groups.size(),gi.length); for (int i=0; i < gi.length; i++) { assertTrue(groups.contains(gi[i])); } final UserGroupInformation fakeUser=UserGroupInformation.createRemoteUser("foo.bar"); fakeUser.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws IOException { UserGroupInformation current=UserGroupInformation.getCurrentUser(); assertFalse(current.equals(login)); assertEquals(current,fakeUser); assertEquals(0,current.getGroupNames().length); return null; } } ); }

    APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testGetUGIFromSubject() throws Exception { KerberosPrincipal p=new KerberosPrincipal("guest"); Subject subject=new Subject(); subject.getPrincipals().add(p); UserGroupInformation ugi=UserGroupInformation.getUGIFromSubject(subject); assertNotNull(ugi); assertEquals("guest@DEFAULT.REALM",ugi.getUserName()); }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testEnsureInitWithRules() throws IOException { String rules="RULE:[1:RULE1]"; UserGroupInformation.reset(); assertFalse(KerberosName.hasRulesBeenSet()); UserGroupInformation.createUserForTesting("someone",new String[0]); assertTrue(KerberosName.hasRulesBeenSet()); UserGroupInformation.reset(); KerberosName.setRules(rules); assertTrue(KerberosName.hasRulesBeenSet()); assertEquals(rules,KerberosName.getRules()); UserGroupInformation.createUserForTesting("someone",new String[0]); assertEquals(rules,KerberosName.getRules()); }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testSetConfigWithRules(){ String[] rules={"RULE:[1:TEST1]","RULE:[1:TEST2]","RULE:[1:TEST3]"}; UserGroupInformation.reset(); assertFalse(KerberosName.hasRulesBeenSet()); KerberosName.setRules(rules[0]); assertTrue(KerberosName.hasRulesBeenSet()); assertEquals(rules[0],KerberosName.getRules()); UserGroupInformation.createUserForTesting("someone",new String[0]); assertEquals(rules[0],KerberosName.getRules()); conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL,rules[1]); UserGroupInformation.setConfiguration(conf); assertEquals(rules[1],KerberosName.getRules()); conf.set(HADOOP_SECURITY_AUTH_TO_LOCAL,rules[2]); UserGroupInformation.setConfiguration(conf); assertEquals(rules[2],KerberosName.getRules()); UserGroupInformation.createUserForTesting("someone",new String[0]); assertEquals(rules[2],KerberosName.getRules()); }

    UtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("unchecked") @Test(timeout=30000) public void testUGITokens() throws Exception { UserGroupInformation ugi=UserGroupInformation.createUserForTesting("TheDoctor",new String[]{"TheTARDIS"}); Token t1=mock(Token.class); when(t1.getService()).thenReturn(new Text("t1")); Token t2=mock(Token.class); when(t2.getService()).thenReturn(new Text("t2")); Credentials creds=new Credentials(); byte[] secretKey=new byte[]{}; Text secretName=new Text("shhh"); creds.addSecretKey(secretName,secretKey); ugi.addToken(t1); ugi.addToken(t2); ugi.addCredentials(creds); Collection> z=ugi.getTokens(); assertTrue(z.contains(t1)); assertTrue(z.contains(t2)); assertEquals(2,z.size()); Credentials ugiCreds=ugi.getCredentials(); assertSame(secretKey,ugiCreds.getSecretKey(secretName)); assertEquals(1,ugiCreds.numberOfSecretKeys()); try { z.remove(t1); fail("Shouldn't be able to modify token collection from UGI"); } catch ( UnsupportedOperationException uoe) { } Collection> otherSet=ugi.doAs(new PrivilegedExceptionAction>>(){ @Override public Collection> run() throws IOException { return UserGroupInformation.getCurrentUser().getTokens(); } } ); assertTrue(otherSet.contains(t1)); assertTrue(otherSet.contains(t2)); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test login method */ @Test(timeout=30000) public void testLogin() throws Exception { conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,String.valueOf(PERCENTILES_INTERVAL)); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); assertEquals(UserGroupInformation.getCurrentUser(),UserGroupInformation.getLoginUser()); assertTrue(ugi.getGroupNames().length >= 1); verifyGroupMetrics(1); UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); UserGroupInformation curUGI=userGroupInfo.doAs(new PrivilegedExceptionAction(){ @Override public UserGroupInformation run() throws IOException { return UserGroupInformation.getCurrentUser(); } } ); assertEquals(curUGI,userGroupInfo); assertFalse(curUGI.equals(UserGroupInformation.getLoginUser())); }

    InternalCallVerifier EqualityVerifier 
    /** * In some scenario, such as HA, delegation tokens are associated with a * logical name. The tokens are cloned and are associated with the * physical address of the server where the service is provided. * This test ensures cloned delegated tokens are locally used * and are not returned in {@link UserGroupInformation#getCredentials()} */ @Test public void testPrivateTokenExclusion() throws Exception { UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); TestTokenIdentifier tokenId=new TestTokenIdentifier(); Token token=new Token(tokenId.getBytes(),"password".getBytes(),tokenId.getKind(),null); ugi.addToken(new Text("regular-token"),token); ugi.addToken(new Text("private-token"),new Token.PrivateToken(token)); ugi.addToken(new Text("private-token1"),new Token.PrivateToken(token)); Collection> tokens=ugi.getCredentials().getAllTokens(); assertEquals(1,tokens.size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testGettingGroups() throws Exception { UserGroupInformation uugi=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); assertEquals(USER_NAME,uugi.getUserName()); assertArrayEquals(new String[]{GROUP1_NAME,GROUP2_NAME,GROUP3_NAME},uugi.getGroupNames()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testUGIAuthMethodInRealUser() throws Exception { final UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); UserGroupInformation proxyUgi=UserGroupInformation.createProxyUser("proxy",ugi); final AuthenticationMethod am=AuthenticationMethod.KERBEROS; ugi.setAuthenticationMethod(am); Assert.assertEquals(am,ugi.getAuthenticationMethod()); Assert.assertEquals(AuthenticationMethod.PROXY,proxyUgi.getAuthenticationMethod()); Assert.assertEquals(am,UserGroupInformation.getRealAuthenticationMethod(proxyUgi)); proxyUgi.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws IOException { Assert.assertEquals(AuthenticationMethod.PROXY,UserGroupInformation.getCurrentUser().getAuthenticationMethod()); Assert.assertEquals(am,UserGroupInformation.getCurrentUser().getRealUser().getAuthenticationMethod()); return null; } } ); UserGroupInformation proxyUgi2=new UserGroupInformation(proxyUgi.getSubject()); proxyUgi2.setAuthenticationMethod(AuthenticationMethod.PROXY); Assert.assertEquals(proxyUgi,proxyUgi2); UserGroupInformation realugi=UserGroupInformation.getCurrentUser(); UserGroupInformation proxyUgi3=UserGroupInformation.createProxyUser("proxyAnother",realugi); UserGroupInformation proxyUgi4=new UserGroupInformation(proxyUgi3.getSubject()); Assert.assertEquals(proxyUgi3,proxyUgi4); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testEqualsWithRealUser() throws Exception { UserGroupInformation realUgi1=UserGroupInformation.createUserForTesting("RealUser",GROUP_NAMES); UserGroupInformation proxyUgi1=UserGroupInformation.createProxyUser(USER_NAME,realUgi1); UserGroupInformation proxyUgi2=new UserGroupInformation(proxyUgi1.getSubject()); UserGroupInformation remoteUgi=UserGroupInformation.createRemoteUser(USER_NAME); assertEquals(proxyUgi1,proxyUgi2); assertFalse(remoteUgi.equals(proxyUgi1)); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testUGIAuthMethod() throws Exception { final UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); final AuthenticationMethod am=AuthenticationMethod.KERBEROS; ugi.setAuthenticationMethod(am); Assert.assertEquals(am,ugi.getAuthenticationMethod()); ugi.doAs(new PrivilegedExceptionAction(){ @Override public Object run() throws IOException { Assert.assertEquals(am,UserGroupInformation.getCurrentUser().getAuthenticationMethod()); return null; } } ); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testCreateRemoteUser(){ UserGroupInformation ugi=UserGroupInformation.createRemoteUser("user1"); assertEquals(AuthenticationMethod.SIMPLE,ugi.getAuthenticationMethod()); assertTrue(ugi.toString().contains("(auth:SIMPLE)")); ugi=UserGroupInformation.createRemoteUser("user1",AuthMethod.KERBEROS); assertEquals(AuthenticationMethod.KERBEROS,ugi.getAuthenticationMethod()); assertTrue(ugi.toString().contains("(auth:KERBEROS)")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testTokenIdentifiers() throws Exception { UserGroupInformation ugi=UserGroupInformation.createUserForTesting("TheDoctor",new String[]{"TheTARDIS"}); TokenIdentifier t1=mock(TokenIdentifier.class); TokenIdentifier t2=mock(TokenIdentifier.class); ugi.addTokenIdentifier(t1); ugi.addTokenIdentifier(t2); Collection z=ugi.getTokenIdentifiers(); assertTrue(z.contains(t1)); assertTrue(z.contains(t2)); assertEquals(2,z.size()); Collection otherSet=ugi.doAs(new PrivilegedExceptionAction>(){ @Override public Collection run() throws IOException { return UserGroupInformation.getCurrentUser().getTokenIdentifiers(); } } ); assertTrue(otherSet.contains(t1)); assertTrue(otherSet.contains(t2)); assertEquals(2,otherSet.size()); }

    IterativeVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * This test checks a race condition between getting and adding tokens for * the current user. Calling UserGroupInformation.getCurrentUser() returns * a new object each time, so simply making these methods synchronized was not * enough to prevent race conditions and causing a * ConcurrentModificationException. These methods are synchronized on the * Subject, which is the same object between UserGroupInformation instances. * This test tries to cause a CME, by exposing the race condition. Previously * this test would fail every time; now it does not. */ @Test public void testTokenRaceCondition() throws Exception { UserGroupInformation userGroupInfo=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); userGroupInfo.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { assertNotEquals(UserGroupInformation.getLoginUser(),UserGroupInformation.getCurrentUser()); GetTokenThread thread=new GetTokenThread(); try { thread.start(); for (int i=0; i < 100; i++) { @SuppressWarnings("unchecked") Token t=mock(Token.class); when(t.getService()).thenReturn(new Text("t" + i)); UserGroupInformation.getCurrentUser().addToken(t); assertNull("ConcurrentModificationException encountered",thread.cme); } } catch ( ConcurrentModificationException cme) { cme.printStackTrace(); fail("ConcurrentModificationException encountered"); } finally { thread.runThread=false; thread.join(5 * 1000); } return null; } } ); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testGetRealAuthenticationMethod(){ UserGroupInformation ugi=UserGroupInformation.createRemoteUser("user1"); ugi.setAuthenticationMethod(AuthenticationMethod.SIMPLE); assertEquals(AuthenticationMethod.SIMPLE,ugi.getAuthenticationMethod()); assertEquals(AuthenticationMethod.SIMPLE,ugi.getRealAuthenticationMethod()); ugi=UserGroupInformation.createProxyUser("user2",ugi); assertEquals(AuthenticationMethod.PROXY,ugi.getAuthenticationMethod()); assertEquals(AuthenticationMethod.SIMPLE,ugi.getRealAuthenticationMethod()); }

    BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testTestAuthMethod() throws Exception { UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); for ( AuthenticationMethod am : AuthenticationMethod.values()) { if (am.getAuthMethod() != null) { ugi.setAuthenticationMethod(am.getAuthMethod()); assertEquals(am,ugi.getAuthenticationMethod()); } } }

    EqualityVerifier 
    @Test(timeout=1000) public void testSetLoginUser() throws IOException { UserGroupInformation ugi=UserGroupInformation.createRemoteUser("test-user"); UserGroupInformation.setLoginUser(ugi); assertEquals(ugi,UserGroupInformation.getLoginUser()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testEquals() throws Exception { UserGroupInformation uugi=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); assertEquals(uugi,uugi); UserGroupInformation ugi2=UserGroupInformation.createUserForTesting(USER_NAME,GROUP_NAMES); assertFalse(uugi.equals(ugi2)); assertFalse(uugi.hashCode() == ugi2.hashCode()); UserGroupInformation ugi3=new UserGroupInformation(uugi.getSubject()); assertEquals(uugi,ugi3); assertEquals(uugi.hashCode(),ugi3.hashCode()); }

    Class: org.apache.hadoop.security.alias.TestCredShell

    InternalCallVerifier EqualityVerifier 
    @Test public void testCommandHelpExitsNormally() throws Exception { for ( String cmd : Arrays.asList("create","list","delete")) { CredentialShell shell=new CredentialShell(); shell.setConf(new Configuration()); assertEquals("Expected help argument on " + cmd + " to return 0",0,shell.init(new String[]{cmd,"-help"})); } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidProvider() throws Exception { String[] args1={"create","credential1","-value","p@ssw0rd","-provider","sdff://file/tmp/credstore.jceks"}; int rc=0; CredentialShell cs=new CredentialShell(); cs.setConf(new Configuration()); rc=cs.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("There are no valid " + "CredentialProviders configured.")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testPromptForCredential() throws Exception { String[] args1={"create","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; ArrayList passwords=new ArrayList(); passwords.add("p@ssw0rd"); passwords.add("p@ssw0rd"); int rc=0; CredentialShell shell=new CredentialShell(); shell.setConf(new Configuration()); shell.setPasswordReader(new MockPasswordReader(passwords)); rc=shell.run(args1); assertEquals(0,rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + "created.")); String[] args2={"delete","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; rc=shell.run(args2); assertEquals(0,rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted.")); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testEmptyArgList() throws Exception { CredentialShell shell=new CredentialShell(); shell.setConf(new Configuration()); assertEquals(1,shell.init(new String[0])); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testTransientProviderOnlyConfig() throws Exception { String[] args1={"create","credential1"}; int rc=0; CredentialShell cs=new CredentialShell(); Configuration config=new Configuration(); config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,"user:///"); cs.setConf(config); rc=cs.run(args1); assertEquals(1,rc); assertTrue(outContent.toString().contains("There are no valid " + "CredentialProviders configured.")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testCredentialSuccessfulLifecycle() throws Exception { outContent.reset(); String[] args1={"create","credential1","-value","p@ssw0rd","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; int rc=0; CredentialShell cs=new CredentialShell(); cs.setConf(new Configuration()); rc=cs.run(args1); assertEquals(outContent.toString(),0,rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + "created.")); outContent.reset(); String[] args2={"list","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; rc=cs.run(args2); assertEquals(0,rc); assertTrue(outContent.toString().contains("credential1")); outContent.reset(); String[] args4={"delete","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; rc=cs.run(args4); assertEquals(0,rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted.")); outContent.reset(); String[] args5={"list","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; rc=cs.run(args5); assertEquals(0,rc); assertFalse(outContent.toString(),outContent.toString().contains("credential1")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testTransientProviderWarning() throws Exception { String[] args1={"create","credential1","-value","p@ssw0rd","-provider","user:///"}; int rc=0; CredentialShell cs=new CredentialShell(); cs.setConf(new Configuration()); rc=cs.run(args1); assertEquals(outContent.toString(),0,rc); assertTrue(outContent.toString().contains("WARNING: you are modifying a " + "transient provider.")); String[] args2={"delete","credential1","-provider","user:///"}; rc=cs.run(args2); assertEquals(outContent.toString(),0,rc); assertTrue(outContent.toString().contains("credential1 has been successfully " + "deleted.")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testPromptForCredentialWithEmptyPasswd() throws Exception { String[] args1={"create","credential1","-provider","jceks://file" + tmpDir + "/credstore.jceks"}; ArrayList passwords=new ArrayList(); passwords.add(null); passwords.add("p@ssw0rd"); int rc=0; CredentialShell shell=new CredentialShell(); shell.setConf(new Configuration()); shell.setPasswordReader(new MockPasswordReader(passwords)); rc=shell.run(args1); assertEquals(outContent.toString(),1,rc); assertTrue(outContent.toString().contains("Passwords don't match")); }

    Class: org.apache.hadoop.security.alias.TestCredentialProvider

    InternalCallVerifier EqualityVerifier 
    @Test public void testCredentialEntry() throws Exception { char[] key1=new char[]{1,2,3,4}; CredentialProvider.CredentialEntry obj=new CredentialProvider.CredentialEntry("cred1",key1); assertEquals("cred1",obj.getAlias()); assertArrayEquals(new char[]{1,2,3,4},obj.getCredential()); }

    EqualityVerifier 
    @Test public void testUnnestUri() throws Exception { assertEquals(new Path("hdfs://nn.example.com/my/path"),ProviderUtils.unnestUri(new URI("myscheme://hdfs@nn.example.com/my/path"))); assertEquals(new Path("hdfs://nn/my/path?foo=bar&baz=bat#yyy"),ProviderUtils.unnestUri(new URI("myscheme://hdfs@nn/my/path?foo=bar&baz=bat#yyy"))); assertEquals(new Path("inner://hdfs@nn1.example.com/my/path"),ProviderUtils.unnestUri(new URI("outer://inner@hdfs@nn1.example.com/my/path"))); assertEquals(new Path("user:///"),ProviderUtils.unnestUri(new URI("outer://user/"))); }

    Class: org.apache.hadoop.security.alias.TestCredentialProviderFactory

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFactory() throws Exception { Configuration conf=new Configuration(); conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,UserProvider.SCHEME_NAME + ":///," + JavaKeyStoreProvider.SCHEME_NAME+ "://file"+ tmpDir+ "/test.jks"); List providers=CredentialProviderFactory.getProviders(conf); assertEquals(2,providers.size()); assertEquals(UserProvider.class,providers.get(0).getClass()); assertEquals(JavaKeyStoreProvider.class,providers.get(1).getClass()); assertEquals(UserProvider.SCHEME_NAME + ":///",providers.get(0).toString()); assertEquals(JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir+ "/test.jks",providers.get(1).toString()); }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testFactoryErrors() throws Exception { Configuration conf=new Configuration(); conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,"unknown:///"); try { List providers=CredentialProviderFactory.getProviders(conf); assertTrue("should throw!",false); } catch ( IOException e) { assertEquals("No CredentialProviderFactory for unknown:/// in " + CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,e.getMessage()); } }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testUriErrors() throws Exception { Configuration conf=new Configuration(); conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,"unkn@own:/x/y"); try { List providers=CredentialProviderFactory.getProviders(conf); assertTrue("should throw!",false); } catch ( IOException e) { assertEquals("Bad configuration of " + CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH + " at unkn@own:/x/y",e.getMessage()); } }

    EqualityVerifier 
    @Test public void testUserProvider() throws Exception { Configuration conf=new Configuration(); final String ourUrl=UserProvider.SCHEME_NAME + ":///"; conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,ourUrl); checkSpecificProvider(conf,ourUrl); Credentials credentials=UserGroupInformation.getCurrentUser().getCredentials(); assertArrayEquals(new byte[]{'1','2','3'},credentials.getSecretKey(new Text("pass2"))); }

    Class: org.apache.hadoop.security.authentication.client.TestAuthenticatedURL

    EqualityVerifier 
    @Test public void testGetAuthenticator() throws Exception { Authenticator authenticator=Mockito.mock(Authenticator.class); AuthenticatedURL aURL=new AuthenticatedURL(authenticator); Assert.assertEquals(authenticator,aURL.getAuthenticator()); }

    EqualityVerifier 
    @Test public void testExtractTokenOK() throws Exception { HttpURLConnection conn=Mockito.mock(HttpURLConnection.class); Mockito.when(conn.getResponseCode()).thenReturn(HttpURLConnection.HTTP_OK); String tokenStr="foo"; Map> headers=new HashMap>(); List cookies=new ArrayList(); cookies.add(AuthenticatedURL.AUTH_COOKIE + "=" + tokenStr); headers.put("Set-Cookie",cookies); Mockito.when(conn.getHeaderFields()).thenReturn(headers); AuthenticatedURL.Token token=new AuthenticatedURL.Token(); AuthenticatedURL.extractToken(conn,token); Assert.assertEquals(tokenStr,token.toString()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testToken() throws Exception { AuthenticatedURL.Token token=new AuthenticatedURL.Token(); Assert.assertFalse(token.isSet()); token=new AuthenticatedURL.Token("foo"); Assert.assertTrue(token.isSet()); Assert.assertEquals("foo",token.toString()); }

    Class: org.apache.hadoop.security.authentication.client.TestKerberosAuthenticator

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testNotAuthenticated() throws Exception { AuthenticatorTestCase auth=new AuthenticatorTestCase(); AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); auth.start(); try { URL url=new URL(auth.getBaseURL()); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.connect(); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode()); Assert.assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null); } finally { auth.stop(); } }

    Class: org.apache.hadoop.security.authentication.client.TestPseudoAuthenticator

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAnonymousDisallowed() throws Exception { AuthenticatorTestCase auth=new AuthenticatorTestCase(); AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false)); auth.start(); try { URL url=new URL(auth.getBaseURL()); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.connect(); Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode()); Assert.assertEquals("Anonymous requests are disallowed",conn.getResponseMessage()); } finally { auth.stop(); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAnonymousAllowed() throws Exception { AuthenticatorTestCase auth=new AuthenticatorTestCase(); AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true)); auth.start(); try { URL url=new URL(auth.getBaseURL()); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.connect(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); } finally { auth.stop(); } }

    EqualityVerifier 
    @Test public void testGetUserName() throws Exception { PseudoAuthenticator authenticator=new PseudoAuthenticator(); Assert.assertEquals(System.getProperty("user.name"),authenticator.getUserName()); }

    Class: org.apache.hadoop.security.authentication.server.TestAltKerberosAuthenticationHandler

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testNonDefaultNonBrowserUserAgentAsBrowser() throws Exception { HttpServletRequest request=Mockito.mock(HttpServletRequest.class); HttpServletResponse response=Mockito.mock(HttpServletResponse.class); if (handler != null) { handler.destroy(); handler=null; } handler=getNewAuthenticationHandler(); Properties props=getDefaultProperties(); props.setProperty("alt-kerberos.non-browser.user-agents","foo, bar"); try { handler.init(props); } catch ( Exception ex) { handler=null; throw ex; } Mockito.when(request.getHeader("User-Agent")).thenReturn("blah"); AuthenticationToken token=handler.authenticate(request,response); Assert.assertEquals("A",token.getUserName()); Assert.assertEquals("B",token.getName()); Assert.assertEquals(getExpectedType(),token.getType()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testAlternateAuthenticationAsBrowser() throws Exception { HttpServletRequest request=Mockito.mock(HttpServletRequest.class); HttpServletResponse response=Mockito.mock(HttpServletResponse.class); Mockito.when(request.getHeader("User-Agent")).thenReturn("Some Browser"); AuthenticationToken token=handler.authenticate(request,response); Assert.assertEquals("A",token.getUserName()); Assert.assertEquals("B",token.getName()); Assert.assertEquals(getExpectedType(),token.getType()); }

    Class: org.apache.hadoop.security.authentication.server.TestAuthenticationFilter

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testInitEmpty() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector().elements()); filter.init(config); Assert.fail(); } catch ( ServletException ex) { Assert.assertEquals("Authentication type must be specified: simple|kerberos|",ex.getMessage()); } catch ( Exception ex) { Assert.fail(); } finally { filter.destroy(); } }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetTokenExpired() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); AuthenticationToken token=new AuthenticationToken("u","p",DummyAuthenticationHandler.TYPE); token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC); Signer signer=new Signer(new StringSignerSecretProvider("secret")); String tokenSigned=signer.sign(token.toString()); Cookie cookie=new Cookie(AuthenticatedURL.AUTH_COOKIE,tokenSigned); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie}); boolean failed=false; try { filter.getToken(request); } catch ( AuthenticationException ex) { Assert.assertEquals("AuthenticationToken expired",ex.getMessage()); failed=true; } finally { Assert.assertTrue("token not expired",failed); } } finally { filter.destroy(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetConfiguration() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn(""); Mockito.when(config.getInitParameter("a")).thenReturn("A"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("a")).elements()); Properties props=filter.getConfiguration("",config); Assert.assertEquals("A",props.getProperty("a")); config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo"); Mockito.when(config.getInitParameter("foo.a")).thenReturn("A"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("foo.a")).elements()); props=filter.getConfiguration("foo.",config); Assert.assertEquals("A",props.getProperty("a")); }

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testInit() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn((new Long(TOKEN_VALIDITY_SEC)).toString()); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertEquals(PseudoAuthenticationHandler.class,filter.getAuthenticationHandler().getClass()); Assert.assertTrue(filter.isRandomSecret()); Assert.assertFalse(filter.isCustomSignerSecretProvider()); Assert.assertNull(filter.getCookieDomain()); Assert.assertNull(filter.getCookiePath()); Assert.assertEquals(TOKEN_VALIDITY_SEC,filter.getValidity()); } finally { filter.destroy(); } filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple"); Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET)).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertFalse(filter.isRandomSecret()); Assert.assertFalse(filter.isCustomSignerSecretProvider()); } finally { filter.destroy(); } filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple"); Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET)).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(new SignerSecretProvider(){ @Override public void init( Properties config, long tokenValidity){ } @Override public byte[] getCurrentSecret(){ return null; } @Override public byte[][] getAllSecrets(){ return null; } } ); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertFalse(filter.isRandomSecret()); Assert.assertTrue(filter.isCustomSignerSecretProvider()); } finally { filter.destroy(); } filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple"); Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com"); Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.COOKIE_DOMAIN,AuthenticationFilter.COOKIE_PATH)).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertEquals(".foo.com",filter.getCookieDomain()); Assert.assertEquals("/bar",filter.getCookiePath()); } finally { filter.destroy(); } DummyAuthenticationHandler.reset(); filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertTrue(DummyAuthenticationHandler.init); } finally { filter.destroy(); Assert.assertTrue(DummyAuthenticationHandler.destroy); } filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements()); filter.init(config); } catch ( ServletException ex) { } finally { Assert.assertEquals(KerberosAuthenticationHandler.class,filter.getAuthenticationHandler().getClass()); filter.destroy(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetToken() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); AuthenticationToken token=new AuthenticationToken("u","p",DummyAuthenticationHandler.TYPE); token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC); Signer signer=new Signer(new StringSignerSecretProvider("secret")); String tokenSigned=signer.sign(token.toString()); Cookie cookie=new Cookie(AuthenticatedURL.AUTH_COOKIE,tokenSigned); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie}); AuthenticationToken newToken=filter.getToken(request); Assert.assertEquals(token.toString(),newToken.toString()); } finally { filter.destroy(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInitCaseSensitivity() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("SimPle"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn((new Long(TOKEN_VALIDITY_SEC)).toString()); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); Assert.assertEquals(PseudoAuthenticationHandler.class,filter.getAuthenticationHandler().getClass()); } finally { filter.destroy(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetRequestURL() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar")); Mockito.when(request.getQueryString()).thenReturn("a=A&b=B"); Assert.assertEquals("http://foo:8080/bar?a=A&b=B",filter.getRequestURL(request)); } finally { filter.destroy(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testDoFilterAuthenticated() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar")); AuthenticationToken token=new AuthenticationToken("u","p","t"); token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC); Signer signer=new Signer(new StringSignerSecretProvider("secret")); String tokenSigned=signer.sign(token.toString()); Cookie cookie=new Cookie(AuthenticatedURL.AUTH_COOKIE,tokenSigned); Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie}); HttpServletResponse response=Mockito.mock(HttpServletResponse.class); FilterChain chain=Mockito.mock(FilterChain.class); Mockito.doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocation) throws Throwable { Object[] args=invocation.getArguments(); HttpServletRequest request=(HttpServletRequest)args[0]; Assert.assertEquals("u",request.getRemoteUser()); Assert.assertEquals("p",request.getUserPrincipal().getName()); return null; } } ).when(chain).doFilter(Mockito.anyObject(),Mockito.anyObject()); filter.doFilter(request,response,chain); } finally { filter.destroy(); } }

    APIUtilityVerifier UtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testDoFilterAuthenticationFailure() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar")); Mockito.when(request.getCookies()).thenReturn(new Cookie[]{}); Mockito.when(request.getHeader("WWW-Authenticate")).thenReturn("dummyauth"); HttpServletResponse response=Mockito.mock(HttpServletResponse.class); FilterChain chain=Mockito.mock(FilterChain.class); final HashMap cookieMap=new HashMap(); Mockito.doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocation) throws Throwable { Object[] args=invocation.getArguments(); parseCookieMap((String)args[1],cookieMap); return null; } } ).when(response).addHeader(Mockito.eq("Set-Cookie"),Mockito.anyString()); Mockito.doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocation) throws Throwable { Assert.fail("shouldn't get here"); return null; } } ).when(chain).doFilter(Mockito.anyObject(),Mockito.anyObject()); filter.doFilter(request,response,chain); Mockito.verify(response).sendError(HttpServletResponse.SC_FORBIDDEN,"AUTH FAILED"); Mockito.verify(response,Mockito.never()).setHeader(Mockito.eq("WWW-Authenticate"),Mockito.anyString()); String value=cookieMap.get(AuthenticatedURL.AUTH_COOKIE); Assert.assertNotNull("cookie missing",value); Assert.assertEquals("",value); } finally { filter.destroy(); } }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetTokenInvalidType() throws Exception { AuthenticationFilter filter=new AuthenticationFilter(); try { FilterConfig config=Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter("management.operation.return")).thenReturn("true"); Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(DummyAuthenticationHandler.class.getName()); Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,AuthenticationFilter.SIGNATURE_SECRET,"management.operation.return")).elements()); ServletContext context=Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null); Mockito.when(config.getServletContext()).thenReturn(context); filter.init(config); AuthenticationToken token=new AuthenticationToken("u","p","invalidtype"); token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC); Signer signer=new Signer(new StringSignerSecretProvider("secret")); String tokenSigned=signer.sign(token.toString()); Cookie cookie=new Cookie(AuthenticatedURL.AUTH_COOKIE,tokenSigned); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie}); boolean failed=false; try { filter.getToken(request); } catch ( AuthenticationException ex) { Assert.assertEquals("Invalid AuthenticationToken type",ex.getMessage()); failed=true; } finally { Assert.assertTrue("token not invalid type",failed); } } finally { filter.destroy(); } }

    Class: org.apache.hadoop.security.authentication.server.TestAuthenticationToken

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetters() throws Exception { long expires=System.currentTimeMillis() + 50; AuthenticationToken token=new AuthenticationToken("u","p","t"); token.setExpires(expires); Assert.assertEquals("u",token.getUserName()); Assert.assertEquals("p",token.getName()); Assert.assertEquals("t",token.getType()); Assert.assertEquals(expires,token.getExpires()); Assert.assertFalse(token.isExpired()); Thread.sleep(70); Assert.assertTrue(token.isExpired()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testToStringAndParse() throws Exception { long expires=System.currentTimeMillis() + 50; AuthenticationToken token=new AuthenticationToken("u","p","t"); token.setExpires(expires); String str=token.toString(); token=AuthenticationToken.parse(str); Assert.assertEquals("p",token.getName()); Assert.assertEquals("t",token.getType()); Assert.assertEquals(expires,token.getExpires()); Assert.assertFalse(token.isExpired()); Thread.sleep(70); Assert.assertTrue(token.isExpired()); }

    BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testAnonymous(){ Assert.assertNotNull(AuthenticationToken.ANONYMOUS); Assert.assertEquals(null,AuthenticationToken.ANONYMOUS.getUserName()); Assert.assertEquals(null,AuthenticationToken.ANONYMOUS.getName()); Assert.assertEquals(null,AuthenticationToken.ANONYMOUS.getType()); Assert.assertEquals(-1,AuthenticationToken.ANONYMOUS.getExpires()); Assert.assertFalse(AuthenticationToken.ANONYMOUS.isExpired()); }

    Class: org.apache.hadoop.security.authentication.server.TestKerberosAuthenticationHandler

    EqualityVerifier 
    @Test(timeout=60000) public void testType() throws Exception { Assert.assertEquals(getExpectedType(),handler.getType()); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testNameRules() throws Exception { KerberosName kn=new KerberosName(KerberosTestUtils.getServerPrincipal()); Assert.assertEquals(KerberosTestUtils.getRealm(),kn.getRealm()); handler.destroy(); KerberosName.setRules("RULE:[1:$1@$0](.*@FOO)s/@.*//\nDEFAULT"); handler=getNewAuthenticationHandler(); Properties props=getDefaultProperties(); props.setProperty(KerberosAuthenticationHandler.NAME_RULES,"RULE:[1:$1@$0](.*@BAR)s/@.*//\nDEFAULT"); try { handler.init(props); } catch ( Exception ex) { } kn=new KerberosName("bar@BAR"); Assert.assertEquals("bar",kn.getShortName()); kn=new KerberosName("bar@FOO"); try { kn.getShortName(); Assert.fail(); } catch ( Exception ex) { } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testDynamicPrincipalDiscovery() throws Exception { String[] keytabUsers=new String[]{"HTTP/host1","HTTP/host2","HTTP2/host1","XHTTP/host"}; String keytab=KerberosTestUtils.getKeytabFile(); getKdc().createPrincipal(new File(keytab),keytabUsers); handler.destroy(); Properties props=new Properties(); props.setProperty(KerberosAuthenticationHandler.KEYTAB,keytab); props.setProperty(KerberosAuthenticationHandler.PRINCIPAL,"*"); handler=getNewAuthenticationHandler(); handler.init(props); Assert.assertEquals(KerberosTestUtils.getKeytabFile(),handler.getKeytab()); Set loginPrincipals=handler.getPrincipals(); for ( String user : keytabUsers) { Principal principal=new KerberosPrincipal(user + "@" + KerberosTestUtils.getRealm()); boolean expected=user.startsWith("HTTP/"); Assert.assertEquals("checking for " + user,expected,loginPrincipals.contains(principal)); } }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testDynamicPrincipalDiscoveryMissingPrincipals() throws Exception { String[] keytabUsers=new String[]{"hdfs/localhost"}; String keytab=KerberosTestUtils.getKeytabFile(); getKdc().createPrincipal(new File(keytab),keytabUsers); handler.destroy(); Properties props=new Properties(); props.setProperty(KerberosAuthenticationHandler.KEYTAB,keytab); props.setProperty(KerberosAuthenticationHandler.PRINCIPAL,"*"); handler=getNewAuthenticationHandler(); try { handler.init(props); Assert.fail("init should have failed"); } catch ( ServletException ex) { Assert.assertEquals("Principals do not exist in the keytab",ex.getCause().getMessage()); } catch ( Throwable t) { Assert.fail("wrong exception: " + t); } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testInit() throws Exception { Assert.assertEquals(KerberosTestUtils.getKeytabFile(),handler.getKeytab()); Set principals=handler.getPrincipals(); Principal expectedPrincipal=new KerberosPrincipal(KerberosTestUtils.getServerPrincipal()); Assert.assertTrue(principals.contains(expectedPrincipal)); Assert.assertEquals(1,principals.size()); }

    Class: org.apache.hadoop.security.authentication.server.TestPseudoAuthenticationHandler

    InternalCallVerifier EqualityVerifier 
    @Test public void testInit() throws Exception { PseudoAuthenticationHandler handler=new PseudoAuthenticationHandler(); try { Properties props=new Properties(); props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED,"false"); handler.init(props); Assert.assertEquals(false,handler.getAcceptAnonymous()); } finally { handler.destroy(); } }

    EqualityVerifier 
    @Test public void testType() throws Exception { PseudoAuthenticationHandler handler=new PseudoAuthenticationHandler(); Assert.assertEquals(PseudoAuthenticationHandler.TYPE,handler.getType()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAnonymousOn() throws Exception { PseudoAuthenticationHandler handler=new PseudoAuthenticationHandler(); try { Properties props=new Properties(); props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED,"true"); handler.init(props); HttpServletRequest request=Mockito.mock(HttpServletRequest.class); HttpServletResponse response=Mockito.mock(HttpServletResponse.class); AuthenticationToken token=handler.authenticate(request,response); Assert.assertEquals(AuthenticationToken.ANONYMOUS,token); } finally { handler.destroy(); } }

    Class: org.apache.hadoop.security.authentication.util.TestKerberosUtil

    APIUtilityVerifier BranchVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetPrincipalNamesFromKeytabWithPattern() throws IOException { createKeyTab(testKeytab,testPrincipals); Pattern httpPattern=Pattern.compile("HTTP/.*"); String[] httpPrincipals=KerberosUtil.getPrincipalNames(testKeytab,httpPattern); Assert.assertNotNull("principals cannot be null",httpPrincipals); int expectedSize=0; List httpPrincipalList=Arrays.asList(httpPrincipals); for ( String principal : testPrincipals) { if (httpPattern.matcher(principal).matches()) { Assert.assertTrue("missing principal " + principal,httpPrincipalList.contains(principal)); expectedSize++; } } Assert.assertEquals(expectedSize,httpPrincipals.length); }

    APIUtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetPrincipalNamesFromKeytab() throws IOException { createKeyTab(testKeytab,testPrincipals); String[] principals=KerberosUtil.getPrincipalNames(testKeytab); Assert.assertNotNull("principals cannot be null",principals); int expectedSize=0; List principalList=Arrays.asList(principals); for ( String principal : testPrincipals) { Assert.assertTrue("missing principal " + principal,principalList.contains(principal)); expectedSize++; } Assert.assertEquals(expectedSize,principals.length); }

    EqualityVerifier 
    @Test public void testGetServerPrincipal() throws IOException { String service="TestKerberosUtil"; String localHostname=KerberosUtil.getLocalHostName(); String testHost="FooBar"; Assert.assertEquals("When no hostname is sent",service + "/" + localHostname.toLowerCase(),KerberosUtil.getServicePrincipal(service,null)); Assert.assertEquals("When empty hostname is sent",service + "/" + localHostname.toLowerCase(),KerberosUtil.getServicePrincipal(service,"")); Assert.assertEquals("When 0.0.0.0 hostname is sent",service + "/" + localHostname.toLowerCase(),KerberosUtil.getServicePrincipal(service,"0.0.0.0")); Assert.assertEquals("When uppercase hostname is sent",service + "/" + testHost.toLowerCase(),KerberosUtil.getServicePrincipal(service,testHost)); Assert.assertEquals("When lowercase hostname is sent",service + "/" + testHost.toLowerCase(),KerberosUtil.getServicePrincipal(service,testHost.toLowerCase())); }

    Class: org.apache.hadoop.security.authentication.util.TestRandomSignerSecretProvider

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetAndRollSecrets() throws Exception { long rolloverFrequency=15 * 1000; long seed=System.currentTimeMillis(); Random rand=new Random(seed); byte[] secret1=Long.toString(rand.nextLong()).getBytes(); byte[] secret2=Long.toString(rand.nextLong()).getBytes(); byte[] secret3=Long.toString(rand.nextLong()).getBytes(); RandomSignerSecretProvider secretProvider=new RandomSignerSecretProvider(seed); try { secretProvider.init(null,rolloverFrequency); byte[] currentSecret=secretProvider.getCurrentSecret(); byte[][] allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret1,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret1,allSecrets[0]); Assert.assertNull(allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); currentSecret=secretProvider.getCurrentSecret(); allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret2,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret2,allSecrets[0]); Assert.assertArrayEquals(secret1,allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); currentSecret=secretProvider.getCurrentSecret(); allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret3,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret3,allSecrets[0]); Assert.assertArrayEquals(secret2,allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); } finally { secretProvider.destroy(); } }

    Class: org.apache.hadoop.security.authentication.util.TestRolloverSignerSecretProvider

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetAndRollSecrets() throws Exception { long rolloverFrequency=15 * 1000; byte[] secret1="doctor".getBytes(); byte[] secret2="who".getBytes(); byte[] secret3="tardis".getBytes(); TRolloverSignerSecretProvider secretProvider=new TRolloverSignerSecretProvider(new byte[][]{secret1,secret2,secret3}); try { secretProvider.init(null,rolloverFrequency); byte[] currentSecret=secretProvider.getCurrentSecret(); byte[][] allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret1,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret1,allSecrets[0]); Assert.assertNull(allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); currentSecret=secretProvider.getCurrentSecret(); allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret2,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret2,allSecrets[0]); Assert.assertArrayEquals(secret1,allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); currentSecret=secretProvider.getCurrentSecret(); allSecrets=secretProvider.getAllSecrets(); Assert.assertArrayEquals(secret3,currentSecret); Assert.assertEquals(2,allSecrets.length); Assert.assertArrayEquals(secret3,allSecrets[0]); Assert.assertArrayEquals(secret2,allSecrets[1]); Thread.sleep(rolloverFrequency + 2000); } finally { secretProvider.destroy(); } }

    Class: org.apache.hadoop.security.authentication.util.TestSigner

    InternalCallVerifier EqualityVerifier 
    @Test public void testSignature() throws Exception { Signer signer=new Signer(new StringSignerSecretProvider("secret")); String s1=signer.sign("ok"); String s2=signer.sign("ok"); String s3=signer.sign("wrong"); Assert.assertEquals(s1,s2); Assert.assertNotEquals(s1,s3); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testMultipleSecrets() throws Exception { TestSignerSecretProvider secretProvider=new TestSignerSecretProvider(); Signer signer=new Signer(secretProvider); secretProvider.setCurrentSecret("secretB"); String t1="test"; String s1=signer.sign(t1); String e1=signer.verifyAndExtract(s1); Assert.assertEquals(t1,e1); secretProvider.setPreviousSecret("secretA"); String t2="test"; String s2=signer.sign(t2); String e2=signer.verifyAndExtract(s2); Assert.assertEquals(t2,e2); Assert.assertEquals(s1,s2); secretProvider.setCurrentSecret("secretC"); secretProvider.setPreviousSecret("secretB"); String t3="test"; String s3=signer.sign(t3); String e3=signer.verifyAndExtract(s3); Assert.assertEquals(t3,e3); Assert.assertNotEquals(s1,s3); String e1b=signer.verifyAndExtract(s1); Assert.assertEquals(t1,e1b); secretProvider.setCurrentSecret("secretD"); secretProvider.setPreviousSecret("secretC"); try { signer.verifyAndExtract(s1); Assert.fail(); } catch ( SignerException ex) { } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testVerify() throws Exception { Signer signer=new Signer(new StringSignerSecretProvider("secret")); String t="test"; String s=signer.sign(t); String e=signer.verifyAndExtract(s); Assert.assertEquals(t,e); }

    Class: org.apache.hadoop.security.authentication.util.TestStringSignerSecretProvider

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetSecrets() throws Exception { String secretStr="secret"; StringSignerSecretProvider secretProvider=new StringSignerSecretProvider(secretStr); secretProvider.init(null,-1); byte[] secretBytes=secretStr.getBytes(); Assert.assertArrayEquals(secretBytes,secretProvider.getCurrentSecret()); byte[][] allSecrets=secretProvider.getAllSecrets(); Assert.assertEquals(1,allSecrets.length); Assert.assertArrayEquals(secretBytes,allSecrets[0]); }

    Class: org.apache.hadoop.security.authorize.TestAccessControlList

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAccessControlList() throws Exception { AccessControlList acl; Collection users; Collection groups; acl=new AccessControlList("drwho tardis"); users=acl.getUsers(); assertEquals(users.size(),1); assertEquals(users.iterator().next(),"drwho"); groups=acl.getGroups(); assertEquals(groups.size(),1); assertEquals(groups.iterator().next(),"tardis"); acl=new AccessControlList("drwho"); users=acl.getUsers(); assertEquals(users.size(),1); assertEquals(users.iterator().next(),"drwho"); groups=acl.getGroups(); assertEquals(groups.size(),0); acl=new AccessControlList("drwho "); users=acl.getUsers(); assertEquals(users.size(),1); assertEquals(users.iterator().next(),"drwho"); groups=acl.getGroups(); assertEquals(groups.size(),0); acl=new AccessControlList(" tardis"); users=acl.getUsers(); assertEquals(users.size(),0); groups=acl.getGroups(); assertEquals(groups.size(),1); assertEquals(groups.iterator().next(),"tardis"); Iterator iter; acl=new AccessControlList("drwho,joe tardis, users"); users=acl.getUsers(); assertEquals(users.size(),2); iter=users.iterator(); assertEquals(iter.next(),"drwho"); assertEquals(iter.next(),"joe"); groups=acl.getGroups(); assertEquals(groups.size(),2); iter=groups.iterator(); assertEquals(iter.next(),"tardis"); assertEquals(iter.next(),"users"); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test addUser/Group and removeUser/Group api. */ @Test public void testAddRemoveAPI(){ AccessControlList acl; Collection users; Collection groups; acl=new AccessControlList(" "); assertEquals(0,acl.getUsers().size()); assertEquals(0,acl.getGroups().size()); assertEquals(" ",acl.getAclString()); acl.addUser("drwho"); users=acl.getUsers(); assertEquals(users.size(),1); assertEquals(users.iterator().next(),"drwho"); assertEquals("drwho ",acl.getAclString()); acl.addGroup("tardis"); groups=acl.getGroups(); assertEquals(groups.size(),1); assertEquals(groups.iterator().next(),"tardis"); assertEquals("drwho tardis",acl.getAclString()); acl.addUser("joe"); acl.addGroup("users"); users=acl.getUsers(); assertEquals(users.size(),2); Iterator iter=users.iterator(); assertEquals(iter.next(),"drwho"); assertEquals(iter.next(),"joe"); groups=acl.getGroups(); assertEquals(groups.size(),2); iter=groups.iterator(); assertEquals(iter.next(),"tardis"); assertEquals(iter.next(),"users"); assertEquals("drwho,joe tardis,users",acl.getAclString()); acl.removeUser("joe"); acl.removeGroup("users"); users=acl.getUsers(); assertEquals(users.size(),1); assertFalse(users.contains("joe")); groups=acl.getGroups(); assertEquals(groups.size(),1); assertFalse(groups.contains("users")); assertEquals("drwho tardis",acl.getAclString()); acl.removeGroup("tardis"); groups=acl.getGroups(); assertEquals(0,groups.size()); assertFalse(groups.contains("tardis")); assertEquals("drwho ",acl.getAclString()); acl.removeUser("drwho"); assertEquals(0,users.size()); assertFalse(users.contains("drwho")); assertEquals(0,acl.getGroups().size()); assertEquals(0,acl.getUsers().size()); assertEquals(" ",acl.getAclString()); }

    Class: org.apache.hadoop.security.authorize.TestProxyUsers

    APIUtilityVerifier EqualityVerifier 
    @Test public void testWithProxyGroupsAndUsersWithSpaces() throws Exception { Configuration conf=new Configuration(); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserUserConfKey(REAL_USER_NAME),StringUtils.join(",",Arrays.asList(PROXY_USER_NAME + " ",AUTHORIZED_PROXY_USER_NAME,"ONEMORE"))); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_NAME),StringUtils.join(",",Arrays.asList(GROUP_NAMES))); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(REAL_USER_NAME),PROXY_IP); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); Collection groupsToBeProxied=ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_NAME)); assertEquals(GROUP_NAMES.length,groupsToBeProxied.size()); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testWithDuplicateProxyHosts() throws Exception { Configuration conf=new Configuration(); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_NAME),StringUtils.join(",",Arrays.asList(GROUP_NAMES))); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(REAL_USER_NAME),StringUtils.join(",",Arrays.asList(PROXY_IP,PROXY_IP))); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); Collection hosts=ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(REAL_USER_NAME)); assertEquals(1,hosts.size()); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testWithDuplicateProxyGroups() throws Exception { Configuration conf=new Configuration(); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_NAME),StringUtils.join(",",Arrays.asList(GROUP_NAMES,GROUP_NAMES))); conf.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(REAL_USER_NAME),PROXY_IP); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); Collection groupsToBeProxied=ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(REAL_USER_NAME)); assertEquals(1,groupsToBeProxied.size()); }

    Class: org.apache.hadoop.security.authorize.TestServiceAuthorization

    InternalCallVerifier EqualityVerifier 
    @Test public void testDefaultAcl(){ ServiceAuthorizationManager serviceAuthorizationManager=new ServiceAuthorizationManager(); Configuration conf=new Configuration(); conf.set(ACL_CONFIG,"user1 group1"); serviceAuthorizationManager.refresh(conf,new TestPolicyProvider()); AccessControlList acl=serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class); assertEquals("user1 group1",acl.getAclString()); acl=serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class); assertEquals(AccessControlList.WILDCARD_ACL_VALUE,acl.getAclString()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL,"user2 group2"); serviceAuthorizationManager.refresh(conf,new TestPolicyProvider()); acl=serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class); assertEquals("user1 group1",acl.getAclString()); acl=serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class); assertEquals("user2 group2",acl.getAclString()); }

    Class: org.apache.hadoop.security.ssl.TestReloadingX509TrustManager

    InternalCallVerifier EqualityVerifier 
    @Test public void testReloadMissingTrustStore() throws Exception { KeyPair kp=generateKeyPair("RSA"); cert1=generateCertificate("CN=Cert1",kp,30,"SHA1withRSA"); cert2=generateCertificate("CN=Cert2",kp,30,"SHA1withRSA"); String truststoreLocation=BASEDIR + "/testmissing.jks"; createTrustStore(truststoreLocation,"password","cert1",cert1); ReloadingX509TrustManager tm=new ReloadingX509TrustManager("jks",truststoreLocation,"password",10); try { tm.init(); assertEquals(1,tm.getAcceptedIssuers().length); X509Certificate cert=tm.getAcceptedIssuers()[0]; new File(truststoreLocation).delete(); Thread.sleep((tm.getReloadInterval() + 200)); assertEquals(1,tm.getAcceptedIssuers().length); assertEquals(cert,tm.getAcceptedIssuers()[0]); } finally { tm.destroy(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testReload() throws Exception { KeyPair kp=generateKeyPair("RSA"); cert1=generateCertificate("CN=Cert1",kp,30,"SHA1withRSA"); cert2=generateCertificate("CN=Cert2",kp,30,"SHA1withRSA"); String truststoreLocation=BASEDIR + "/testreload.jks"; createTrustStore(truststoreLocation,"password","cert1",cert1); ReloadingX509TrustManager tm=new ReloadingX509TrustManager("jks",truststoreLocation,"password",10); try { tm.init(); assertEquals(1,tm.getAcceptedIssuers().length); Thread.sleep((tm.getReloadInterval() + 1000)); Map certs=new HashMap(); certs.put("cert1",cert1); certs.put("cert2",cert2); createTrustStore(truststoreLocation,"password",certs); assertEquals(10,tm.getReloadInterval()); Thread.sleep((tm.getReloadInterval() + 200)); assertEquals(2,tm.getAcceptedIssuers().length); } finally { tm.destroy(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testReloadCorruptTrustStore() throws Exception { KeyPair kp=generateKeyPair("RSA"); cert1=generateCertificate("CN=Cert1",kp,30,"SHA1withRSA"); cert2=generateCertificate("CN=Cert2",kp,30,"SHA1withRSA"); String truststoreLocation=BASEDIR + "/testcorrupt.jks"; createTrustStore(truststoreLocation,"password","cert1",cert1); ReloadingX509TrustManager tm=new ReloadingX509TrustManager("jks",truststoreLocation,"password",10); try { tm.init(); assertEquals(1,tm.getAcceptedIssuers().length); X509Certificate cert=tm.getAcceptedIssuers()[0]; OutputStream os=new FileOutputStream(truststoreLocation); os.write(1); os.close(); new File(truststoreLocation).setLastModified(System.currentTimeMillis() - 1000); Thread.sleep((tm.getReloadInterval() + 200)); assertEquals(1,tm.getAcceptedIssuers().length); assertEquals(cert,tm.getAcceptedIssuers()[0]); } finally { tm.destroy(); } }

    Class: org.apache.hadoop.security.ssl.TestSSLFactory

    IdentityVerifier EqualityVerifier HybridVerifier 
    @Test public void testConnectionConfigurator() throws Exception { Configuration conf=createConfiguration(false,true); conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY,"STRICT_IE6"); SSLFactory sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf); try { sslFactory.init(); HttpsURLConnection sslConn=(HttpsURLConnection)new URL("https://foo").openConnection(); Assert.assertNotSame("STRICT_IE6",sslConn.getHostnameVerifier().toString()); sslFactory.configure(sslConn); Assert.assertEquals("STRICT_IE6",sslConn.getHostnameVerifier().toString()); } finally { sslFactory.destroy(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void validHostnameVerifier() throws Exception { Configuration conf=createConfiguration(false,true); conf.unset(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY); SSLFactory sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf); sslFactory.init(); Assert.assertEquals("DEFAULT",sslFactory.getHostnameVerifier().toString()); sslFactory.destroy(); conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY,"ALLOW_ALL"); sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf); sslFactory.init(); Assert.assertEquals("ALLOW_ALL",sslFactory.getHostnameVerifier().toString()); sslFactory.destroy(); conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY,"DEFAULT_AND_LOCALHOST"); sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf); sslFactory.init(); Assert.assertEquals("DEFAULT_AND_LOCALHOST",sslFactory.getHostnameVerifier().toString()); sslFactory.destroy(); conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY,"STRICT"); sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf); sslFactory.init(); Assert.assertEquals("STRICT",sslFactory.getHostnameVerifier().toString()); sslFactory.destroy(); conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY,"STRICT_IE6"); sslFactory=new SSLFactory(SSLFactory.Mode.CLIENT,conf); sslFactory.init(); Assert.assertEquals("STRICT_IE6",sslFactory.getHostnameVerifier().toString()); sslFactory.destroy(); }

    Class: org.apache.hadoop.security.token.delegation.TestDelegationToken

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testParallelDelegationTokenCreation() throws Exception { final TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(2000,24 * 60 * 60* 1000,7 * 24 * 60* 60* 1000,2000); try { dtSecretManager.startThreads(); int numThreads=100; final int numTokensPerThread=100; class tokenIssuerThread implements Runnable { @Override public void run(){ for (int i=0; i < numTokensPerThread; i++) { generateDelegationToken(dtSecretManager,"auser","arenewer"); try { Thread.sleep(250); } catch ( Exception e) { } } } } Thread[] issuers=new Thread[numThreads]; for (int i=0; i < numThreads; i++) { issuers[i]=new Daemon(new tokenIssuerThread()); issuers[i].start(); } for (int i=0; i < numThreads; i++) { issuers[i].join(); } Map tokenCache=dtSecretManager.getAllTokens(); Assert.assertEquals(numTokensPerThread * numThreads,tokenCache.size()); Iterator iter=tokenCache.keySet().iterator(); while (iter.hasNext()) { TestDelegationTokenIdentifier id=iter.next(); DelegationTokenInformation info=tokenCache.get(id); Assert.assertTrue(info != null); DelegationKey key=dtSecretManager.getKey(id); Assert.assertTrue(key != null); byte[] storedPassword=dtSecretManager.retrievePassword(id); byte[] password=dtSecretManager.createPassword(id,key); Assert.assertTrue(Arrays.equals(password,storedPassword)); dtSecretManager.verifyToken(id,password); } } finally { dtSecretManager.stopThreads(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSerialization() throws Exception { TestDelegationTokenIdentifier origToken=new TestDelegationTokenIdentifier(new Text("alice"),new Text("bob"),new Text("colin")); TestDelegationTokenIdentifier newToken=new TestDelegationTokenIdentifier(); origToken.setIssueDate(123); origToken.setMasterKeyId(321); origToken.setMaxDate(314); origToken.setSequenceNumber(12345); DataInputBuffer inBuf=new DataInputBuffer(); DataOutputBuffer outBuf=new DataOutputBuffer(); origToken.write(outBuf); inBuf.reset(outBuf.getData(),0,outBuf.getLength()); newToken.readFields(inBuf); assertEquals("alice",newToken.getUser().getUserName()); assertEquals(new Text("bob"),newToken.getRenewer()); assertEquals("colin",newToken.getUser().getRealUser().getUserName()); assertEquals(123,newToken.getIssueDate()); assertEquals(321,newToken.getMasterKeyId()); assertEquals(314,newToken.getMaxDate()); assertEquals(12345,newToken.getSequenceNumber()); assertEquals(origToken,newToken); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetUserWithOwnerEqualsReal(){ Text owner=new Text("owner"); TestDelegationTokenIdentifier ident=new TestDelegationTokenIdentifier(owner,null,owner); UserGroupInformation ugi=ident.getUser(); assertNull(ugi.getRealUser()); assertEquals("owner",ugi.getUserName()); assertEquals(AuthenticationMethod.TOKEN,ugi.getAuthenticationMethod()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetUserWithOwnerAndReal(){ Text owner=new Text("owner"); Text realUser=new Text("realUser"); TestDelegationTokenIdentifier ident=new TestDelegationTokenIdentifier(owner,null,realUser); UserGroupInformation ugi=ident.getUser(); assertNotNull(ugi.getRealUser()); assertNull(ugi.getRealUser().getRealUser()); assertEquals("owner",ugi.getUserName()); assertEquals("realUser",ugi.getRealUser().getUserName()); assertEquals(AuthenticationMethod.PROXY,ugi.getAuthenticationMethod()); assertEquals(AuthenticationMethod.TOKEN,ugi.getRealUser().getAuthenticationMethod()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test @SuppressWarnings("unchecked") public void testDelegationTokenSelector() throws Exception { TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(24 * 60 * 60* 1000,10 * 1000,1 * 1000,3600000); try { dtSecretManager.startThreads(); AbstractDelegationTokenSelector ds=new AbstractDelegationTokenSelector(KIND); Token token1=generateDelegationToken(dtSecretManager,"SomeUser1","JobTracker"); token1.setService(new Text("MY-SERVICE1")); Token token2=generateDelegationToken(dtSecretManager,"SomeUser2","JobTracker"); token2.setService(new Text("MY-SERVICE2")); List> tokens=new ArrayList>(); tokens.add(token1); tokens.add(token2); Token t=ds.selectToken(new Text("MY-SERVICE1"),tokens); Assert.assertEquals(t,token1); } finally { dtSecretManager.stopThreads(); } }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetUserWithOwner(){ TestDelegationTokenIdentifier ident=new TestDelegationTokenIdentifier(new Text("owner"),null,null); UserGroupInformation ugi=ident.getUser(); assertNull(ugi.getRealUser()); assertEquals("owner",ugi.getUserName()); assertEquals(AuthenticationMethod.TOKEN,ugi.getAuthenticationMethod()); }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testDelegationKeyEqualAndHash(){ DelegationKey key1=new DelegationKey(1111,2222,"keyBytes".getBytes()); DelegationKey key2=new DelegationKey(1111,2222,"keyBytes".getBytes()); DelegationKey key3=new DelegationKey(3333,2222,"keyBytes".getBytes()); Assert.assertEquals(key1,key2); Assert.assertFalse(key2.equals(key3)); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testRollMasterKey() throws Exception { TestDelegationTokenSecretManager dtSecretManager=new TestDelegationTokenSecretManager(800,800,1 * 1000,3600000); try { dtSecretManager.startThreads(); Token token=generateDelegationToken(dtSecretManager,"SomeUser","JobTracker"); byte[] oldPasswd=token.getPassword(); int prevNumKeys=dtSecretManager.getAllKeys().length; dtSecretManager.rollMasterKey(); Assert.assertTrue(dtSecretManager.isStoreNewMasterKeyCalled); int currNumKeys=dtSecretManager.getAllKeys().length; Assert.assertEquals((currNumKeys - prevNumKeys) >= 1,true); ByteArrayInputStream bi=new ByteArrayInputStream(token.getIdentifier()); TestDelegationTokenIdentifier identifier=dtSecretManager.createIdentifier(); identifier.readFields(new DataInputStream(bi)); byte[] newPasswd=dtSecretManager.retrievePassword(identifier); Assert.assertEquals(oldPasswd,newPasswd); while (!dtSecretManager.isRemoveStoredMasterKeyCalled) { Thread.sleep(200); } } finally { dtSecretManager.stopThreads(); } }

    Class: org.apache.hadoop.security.token.delegation.web.TestWebDelegationToken

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testExternalDelegationTokenSecretManager() throws Exception { DummyDelegationTokenSecretManager secretMgr=new DummyDelegationTokenSecretManager(); final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(AFilter.class),"/*",0); context.addServlet(new ServletHolder(PingServlet.class),"/bar"); try { secretMgr.startThreads(); context.setAttribute(DelegationTokenAuthenticationFilter.DELEGATION_TOKEN_SECRET_MANAGER_ATTR,secretMgr); jetty.start(); URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo"); DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL(); aUrl.getDelegationToken(authURL,token,FOO_USER); Assert.assertNotNull(token.getDelegationToken()); Assert.assertEquals(new Text("fooKind"),token.getDelegationToken().getKind()); } finally { jetty.stop(); secretMgr.stopThreads(); } }

    UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testDelegationTokenAuthenticatorCalls() throws Exception { final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(AFilter.class),"/*",0); context.addServlet(new ServletHolder(PingServlet.class),"/bar"); try { jetty.start(); URL nonAuthURL=new URL(getJettyURL() + "/foo/bar"); URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo"); URL authURL2=new URL(getJettyURL() + "/foo/bar?authenticated=bar"); DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL(); try { aUrl.getDelegationToken(nonAuthURL,token,FOO_USER); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("401")); } aUrl.getDelegationToken(authURL,token,FOO_USER); Assert.assertNotNull(token.getDelegationToken()); Assert.assertEquals(new Text("token-kind"),token.getDelegationToken().getKind()); aUrl.renewDelegationToken(authURL,token); try { aUrl.renewDelegationToken(nonAuthURL,token); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("401")); } aUrl.getDelegationToken(authURL,token,FOO_USER); try { aUrl.renewDelegationToken(authURL2,token); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("403")); } aUrl.getDelegationToken(authURL,token,FOO_USER); aUrl.cancelDelegationToken(authURL,token); aUrl.getDelegationToken(authURL,token,FOO_USER); aUrl.cancelDelegationToken(nonAuthURL,token); aUrl.getDelegationToken(authURL,token,FOO_USER); try { aUrl.renewDelegationToken(nonAuthURL,token); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("401")); } } finally { jetty.stop(); } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testRawHttpCalls() throws Exception { final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(AFilter.class),"/*",0); context.addServlet(new ServletHolder(PingServlet.class),"/bar"); try { jetty.start(); URL nonAuthURL=new URL(getJettyURL() + "/foo/bar"); URL authURL=new URL(getJettyURL() + "/foo/bar?authenticated=foo"); HttpURLConnection conn=(HttpURLConnection)nonAuthURL.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode()); conn=(HttpURLConnection)authURL.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); URL url=new URL(nonAuthURL.toExternalForm() + "?op=GETDELEGATIONTOKEN"); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode()); url=new URL(authURL.toExternalForm() + "&op=GETDELEGATIONTOKEN&renewer=foo"); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); ObjectMapper mapper=new ObjectMapper(); Map map=mapper.readValue(conn.getInputStream(),Map.class); String dt=(String)((Map)map.get("Token")).get("urlString"); Assert.assertNotNull(dt); url=new URL(nonAuthURL.toExternalForm() + "?delegation=" + dt); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(authURL.toExternalForm() + "&delegation=" + dt); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(nonAuthURL.toExternalForm() + "?op=RENEWDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,conn.getResponseCode()); url=new URL(authURL.toExternalForm() + "&op=RENEWDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(getJettyURL() + "/foo/bar?authenticated=bar&op=RENEWDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode()); url=new URL(nonAuthURL.toExternalForm() + "?op=CANCELDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); url=new URL(nonAuthURL.toExternalForm() + "?op=CANCELDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_NOT_FOUND,conn.getResponseCode()); url=new URL(authURL.toExternalForm() + "&op=GETDELEGATIONTOKEN&renewer=foo"); conn=(HttpURLConnection)url.openConnection(); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); mapper=new ObjectMapper(); map=mapper.readValue(conn.getInputStream(),Map.class); dt=(String)((Map)map.get("Token")).get("urlString"); Assert.assertNotNull(dt); url=new URL(authURL.toExternalForm() + "&op=CANCELDELEGATIONTOKEN&token=" + dt); conn=(HttpURLConnection)url.openConnection(); conn.setRequestMethod("PUT"); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); } finally { jetty.stop(); } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testFallbackToPseudoDelegationTokenAuthenticator() throws Exception { final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(PseudoDTAFilter.class),"/*",0); context.addServlet(new ServletHolder(UserServlet.class),"/bar"); try { jetty.start(); final URL url=new URL(getJettyURL() + "/foo/bar"); UserGroupInformation ugi=UserGroupInformation.createRemoteUser(FOO_USER); ugi.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL(); HttpURLConnection conn=aUrl.openConnection(url,token); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); List ret=IOUtils.readLines(conn.getInputStream()); Assert.assertEquals(1,ret.size()); Assert.assertEquals(FOO_USER,ret.get(0)); aUrl.getDelegationToken(url,token,FOO_USER); Assert.assertNotNull(token.getDelegationToken()); Assert.assertEquals(new Text("token-kind"),token.getDelegationToken().getKind()); return null; } } ); } finally { jetty.stop(); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testHttpUGI() throws Exception { final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(PseudoDTAFilter.class),"/*",0); context.addServlet(new ServletHolder(UGIServlet.class),"/bar"); try { jetty.start(); final URL url=new URL(getJettyURL() + "/foo/bar"); UserGroupInformation ugi=UserGroupInformation.createRemoteUser(FOO_USER); ugi.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL(); HttpURLConnection conn=aUrl.openConnection(url,token); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); List ret=IOUtils.readLines(conn.getInputStream()); Assert.assertEquals(1,ret.size()); Assert.assertEquals("remoteuser=" + FOO_USER + ":ugi="+ FOO_USER,ret.get(0)); conn=aUrl.openConnection(url,token,OK_USER); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); ret=IOUtils.readLines(conn.getInputStream()); Assert.assertEquals(1,ret.size()); Assert.assertEquals("realugi=" + FOO_USER + ":remoteuser="+ OK_USER+ ":ugi="+ OK_USER,ret.get(0)); return null; } } ); } finally { jetty.stop(); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testProxyUser() throws Exception { final Server jetty=createJettyServer(); Context context=new Context(); context.setContextPath("/foo"); jetty.setHandler(context); context.addFilter(new FilterHolder(PseudoDTAFilter.class),"/*",0); context.addServlet(new ServletHolder(UserServlet.class),"/bar"); try { jetty.start(); final URL url=new URL(getJettyURL() + "/foo/bar"); UserGroupInformation ugi=UserGroupInformation.createRemoteUser(FOO_USER); ugi.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws Exception { DelegationTokenAuthenticatedURL.Token token=new DelegationTokenAuthenticatedURL.Token(); DelegationTokenAuthenticatedURL aUrl=new DelegationTokenAuthenticatedURL(); HttpURLConnection conn=aUrl.openConnection(url,token,OK_USER); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); List ret=IOUtils.readLines(conn.getInputStream()); Assert.assertEquals(1,ret.size()); Assert.assertEquals(OK_USER,ret.get(0)); conn=aUrl.openConnection(url,token,FAIL_USER); Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,conn.getResponseCode()); aUrl.getDelegationToken(url,token,FOO_USER); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); ugi.addToken(token.getDelegationToken()); token=new DelegationTokenAuthenticatedURL.Token(); conn=aUrl.openConnection(url,token,OK_USER); Assert.assertEquals(HttpURLConnection.HTTP_OK,conn.getResponseCode()); ret=IOUtils.readLines(conn.getInputStream()); Assert.assertEquals(1,ret.size()); Assert.assertEquals(FOO_USER,ret.get(0)); return null; } } ); } finally { jetty.stop(); } }

    Class: org.apache.hadoop.service.TestCompositeService

    IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testCallSequence(){ ServiceManager serviceManager=new ServiceManager("ServiceManager"); for (int i=0; i < NUM_OF_SERVICES; i++) { CompositeServiceImpl service=new CompositeServiceImpl(i); serviceManager.addTestService(service); } CompositeServiceImpl[] services=serviceManager.getServices().toArray(new CompositeServiceImpl[0]); assertEquals("Number of registered services ",NUM_OF_SERVICES,services.length); Configuration conf=new Configuration(); serviceManager.init(conf); assertInState(STATE.INITED,services); for (int i=0; i < NUM_OF_SERVICES; i++) { assertEquals("For " + services[i] + " service, init() call sequence number should have been ",i,services[i].getCallSequenceNumber()); } resetServices(services); serviceManager.start(); assertInState(STATE.STARTED,services); for (int i=0; i < NUM_OF_SERVICES; i++) { assertEquals("For " + services[i] + " service, start() call sequence number should have been ",i,services[i].getCallSequenceNumber()); } resetServices(services); serviceManager.stop(); assertInState(STATE.STOPPED,services); for (int i=0; i < NUM_OF_SERVICES; i++) { assertEquals("For " + services[i] + " service, stop() call sequence number should have been ",((NUM_OF_SERVICES - 1) - i),services[i].getCallSequenceNumber()); } serviceManager.stop(); for (int i=0; i < NUM_OF_SERVICES; i++) { assertEquals("For " + services[i] + " service, stop() call sequence number should have been ",((NUM_OF_SERVICES - 1) - i),services[i].getCallSequenceNumber()); } }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=1000) public void testAddInitedSiblingInInit() throws Throwable { CompositeService parent=new CompositeService("parent"); BreakableService sibling=new BreakableService(); sibling.init(new Configuration()); parent.addService(new AddSiblingService(parent,sibling,STATE.INITED)); parent.init(new Configuration()); parent.start(); parent.stop(); assertEquals("Incorrect number of services",2,parent.getServices().size()); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=1000) public void testAddUninitedSiblingInInit() throws Throwable { CompositeService parent=new CompositeService("parent"); BreakableService sibling=new BreakableService(); parent.addService(new AddSiblingService(parent,sibling,STATE.INITED)); parent.init(new Configuration()); try { parent.start(); fail("Expected an exception, got " + parent); } catch ( ServiceStateException e) { } parent.stop(); assertEquals("Incorrect number of services",2,parent.getServices().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=1000) public void testAddStartedSiblingInStart() throws Throwable { CompositeService parent=new CompositeService("parent"); BreakableService sibling=new BreakableService(); sibling.init(new Configuration()); sibling.start(); parent.addService(new AddSiblingService(parent,sibling,STATE.STARTED)); parent.init(new Configuration()); parent.start(); parent.stop(); assertEquals("Incorrect number of services",2,parent.getServices().size()); }

    IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testServiceStartup(){ ServiceManager serviceManager=new ServiceManager("ServiceManager"); for (int i=0; i < NUM_OF_SERVICES; i++) { CompositeServiceImpl service=new CompositeServiceImpl(i); if (i == FAILED_SERVICE_SEQ_NUMBER) { service.setThrowExceptionOnStart(true); } serviceManager.addTestService(service); } CompositeServiceImpl[] services=serviceManager.getServices().toArray(new CompositeServiceImpl[0]); Configuration conf=new Configuration(); serviceManager.init(conf); try { serviceManager.start(); fail("Exception should have been thrown due to startup failure of last service"); } catch ( ServiceTestRuntimeException e) { for (int i=0; i < NUM_OF_SERVICES - 1; i++) { if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) { assertEquals("Service state should have been ",STATE.INITED,services[NUM_OF_SERVICES - 1].getServiceState()); } else { assertEquals("Service state should have been ",STATE.STOPPED,services[i].getServiceState()); } } } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRemoveService(){ CompositeService testService=new CompositeService("TestService"){ @Override public void serviceInit( Configuration conf){ Integer notAService=new Integer(0); assertFalse("Added an integer as a service",addIfService(notAService)); Service service1=new AbstractService("Service1"){ } ; addIfService(service1); Service service2=new AbstractService("Service2"){ } ; addIfService(service2); Service service3=new AbstractService("Service3"){ } ; addIfService(service3); removeService(service1); } } ; testService.init(new Configuration()); assertEquals("Incorrect number of services",2,testService.getServices().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=1000) public void testAddStartedSiblingInStop() throws Throwable { CompositeService parent=new CompositeService("parent"); BreakableService sibling=new BreakableService(); sibling.init(new Configuration()); sibling.start(); parent.addService(new AddSiblingService(parent,sibling,STATE.STOPPED)); parent.init(new Configuration()); parent.start(); parent.stop(); assertEquals("Incorrect number of services",2,parent.getServices().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=1000) public void testAddStartedSiblingInInit() throws Throwable { CompositeService parent=new CompositeService("parent"); BreakableService sibling=new BreakableService(); sibling.init(new Configuration()); sibling.start(); parent.addService(new AddSiblingService(parent,sibling,STATE.INITED)); parent.init(new Configuration()); assertInState(STATE.STARTED,sibling); parent.start(); assertInState(STATE.STARTED,sibling); parent.stop(); assertEquals("Incorrect number of services",2,parent.getServices().size()); assertInState(STATE.STOPPED,sibling); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=1000) public void testAddIfService(){ CompositeService testService=new CompositeService("TestService"){ Service service; @Override public void serviceInit( Configuration conf){ Integer notAService=new Integer(0); assertFalse("Added an integer as a service",addIfService(notAService)); service=new AbstractService("Service"){ } ; assertTrue("Unable to add a service",addIfService(service)); } } ; testService.init(new Configuration()); assertEquals("Incorrect number of services",1,testService.getServices().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=1000) public void testAddUninitedSiblingInStart() throws Throwable { CompositeService parent=new CompositeService("parent"); BreakableService sibling=new BreakableService(); parent.addService(new AddSiblingService(parent,sibling,STATE.STARTED)); parent.init(new Configuration()); assertInState(STATE.NOTINITED,sibling); parent.start(); parent.stop(); assertEquals("Incorrect number of services",2,parent.getServices().size()); }

    Class: org.apache.hadoop.service.TestGlobalStateChangeListener

    InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    /** * Test that the {@link BreakableStateChangeListener} is picking up * the state changes and that its last event field is as expected. */ @Test public void testEventHistory(){ register(); BreakableService service=new BreakableService(); assertListenerState(listener,Service.STATE.NOTINITED); assertEquals(0,listener.getEventCount()); service.init(new Configuration()); assertListenerState(listener,Service.STATE.INITED); assertSame(service,listener.getLastService()); assertListenerEventCount(listener,1); service.start(); assertListenerState(listener,Service.STATE.STARTED); assertListenerEventCount(listener,2); service.stop(); assertListenerState(listener,Service.STATE.STOPPED); assertListenerEventCount(listener,3); }

    Class: org.apache.hadoop.service.TestServiceLifecycle

    UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Show that if the service failed during an init * operation, stop was called. */ @Test public void testStopFailingInitAndStop() throws Throwable { BreakableService svc=new BreakableService(true,false,true); svc.registerServiceListener(new LoggingStateChangeListener()); try { svc.init(new Configuration()); fail("Expected a failure, got " + svc); } catch ( BreakableService.BrokenLifecycleEvent e) { assertEquals(Service.STATE.INITED,e.state); } assertServiceStateStopped(svc); assertEquals(Service.STATE.INITED,svc.getFailureState()); Throwable failureCause=svc.getFailureCause(); assertNotNull("Null failure cause in " + svc,failureCause); BreakableService.BrokenLifecycleEvent cause=(BreakableService.BrokenLifecycleEvent)failureCause; assertNotNull("null state in " + cause + " raised by "+ svc,cause.state); assertEquals(Service.STATE.INITED,cause.state); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testServiceFailingNotifications() throws Throwable { BreakableService svc=new BreakableService(false,false,false); BreakableStateChangeListener listener=new BreakableStateChangeListener(); listener.setFailingState(Service.STATE.STARTED); svc.registerServiceListener(listener); svc.init(new Configuration()); assertEventCount(listener,1); svc.start(); assertEventCount(listener,2); assertEquals(1,listener.getFailureCount()); svc.stop(); assertEventCount(listener,3); assertEquals(1,listener.getFailureCount()); svc.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * This test verifies that you can block waiting for something to happen * and use notifications to manage it * @throws Throwable on a failure */ @Test public void testListenerWithNotifications() throws Throwable { AsyncSelfTerminatingService service=new AsyncSelfTerminatingService(2000); NotifyingListener listener=new NotifyingListener(); service.registerServiceListener(listener); service.init(new Configuration()); service.start(); assertServiceInState(service,Service.STATE.STARTED); long start=System.currentTimeMillis(); synchronized (listener) { listener.wait(20000); } long duration=System.currentTimeMillis() - start; assertEquals(Service.STATE.STOPPED,listener.notifyingState); assertServiceInState(service,Service.STATE.STOPPED); assertTrue("Duration of " + duration + " too long",duration < 10000); }

    Class: org.apache.hadoop.streaming.TestAutoInputFormat

    IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings({"unchecked","deprecation"}) @Test public void testFormat() throws IOException { JobConf job=new JobConf(conf); FileSystem fs=FileSystem.getLocal(conf); Path dir=new Path(System.getProperty("test.build.data",".") + "/mapred"); Path txtFile=new Path(dir,"auto.txt"); Path seqFile=new Path(dir,"auto.seq"); fs.delete(dir,true); FileInputFormat.setInputPaths(job,dir); Writer txtWriter=new OutputStreamWriter(fs.create(txtFile)); try { for (int i=0; i < LINES_COUNT; i++) { txtWriter.write("" + (10 * i)); txtWriter.write("\n"); } } finally { txtWriter.close(); } SequenceFile.Writer seqWriter=SequenceFile.createWriter(fs,conf,seqFile,IntWritable.class,LongWritable.class); try { for (int i=0; i < RECORDS_COUNT; i++) { IntWritable key=new IntWritable(11 * i); LongWritable value=new LongWritable(12 * i); seqWriter.append(key,value); } } finally { seqWriter.close(); } AutoInputFormat format=new AutoInputFormat(); InputSplit[] splits=format.getSplits(job,SPLITS_COUNT); for ( InputSplit split : splits) { RecordReader reader=format.getRecordReader(split,job,Reporter.NULL); Object key=reader.createKey(); Object value=reader.createValue(); try { while (reader.next(key,value)) { if (key instanceof LongWritable) { assertEquals("Wrong value class.",Text.class,value.getClass()); assertTrue("Invalid value",Integer.parseInt(((Text)value).toString()) % 10 == 0); } else { assertEquals("Wrong key class.",IntWritable.class,key.getClass()); assertEquals("Wrong value class.",LongWritable.class,value.getClass()); assertTrue("Invalid key.",((IntWritable)key).get() % 11 == 0); assertTrue("Invalid value.",((LongWritable)value).get() % 12 == 0); } } } finally { reader.close(); } } }

    Class: org.apache.hadoop.streaming.TestDumpTypedBytes

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testDumping() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); PrintStream psBackup=System.out; ByteArrayOutputStream out=new ByteArrayOutputStream(); PrintStream psOut=new PrintStream(out); System.setOut(psOut); DumpTypedBytes dumptb=new DumpTypedBytes(conf); try { Path root=new Path("/typedbytestest"); assertTrue(fs.mkdirs(root)); assertTrue(fs.exists(root)); OutputStreamWriter writer=new OutputStreamWriter(fs.create(new Path(root,"test.txt"))); try { for (int i=0; i < 100; i++) { writer.write("" + (10 * i) + "\n"); } } finally { writer.close(); } String[] args=new String[1]; args[0]="/typedbytestest"; int ret=dumptb.run(args); assertEquals("Return value != 0.",0,ret); ByteArrayInputStream in=new ByteArrayInputStream(out.toByteArray()); TypedBytesInput tbinput=new TypedBytesInput(new DataInputStream(in)); int counter=0; Object key=tbinput.read(); while (key != null) { assertEquals(Long.class,key.getClass()); Object value=tbinput.read(); assertEquals(String.class,value.getClass()); assertTrue("Invalid output.",Integer.parseInt(value.toString()) % 10 == 0); counter++; key=tbinput.read(); } assertEquals("Wrong number of outputs.",100,counter); } finally { try { fs.close(); } catch ( Exception e) { } System.setOut(psBackup); cluster.shutdown(); } }

    Class: org.apache.hadoop.streaming.TestLoadTypedBytes

    IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testLoading() throws Exception { Configuration conf=new Configuration(); MiniDFSCluster cluster=new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); FileSystem fs=cluster.getFileSystem(); ByteArrayOutputStream out=new ByteArrayOutputStream(); TypedBytesOutput tboutput=new TypedBytesOutput(new DataOutputStream(out)); for (int i=0; i < 100; i++) { tboutput.write(new Long(i)); tboutput.write("" + (10 * i)); } InputStream isBackup=System.in; ByteArrayInputStream in=new ByteArrayInputStream(out.toByteArray()); System.setIn(in); LoadTypedBytes loadtb=new LoadTypedBytes(conf); try { Path root=new Path("/typedbytestest"); assertTrue(fs.mkdirs(root)); assertTrue(fs.exists(root)); String[] args=new String[1]; args[0]="/typedbytestest/test.seq"; int ret=loadtb.run(args); assertEquals("Return value != 0.",0,ret); Path file=new Path(root,"test.seq"); assertTrue(fs.exists(file)); SequenceFile.Reader reader=new SequenceFile.Reader(fs,file,conf); int counter=0; TypedBytesWritable key=new TypedBytesWritable(); TypedBytesWritable value=new TypedBytesWritable(); while (reader.next(key,value)) { assertEquals(Long.class,key.getValue().getClass()); assertEquals(String.class,value.getValue().getClass()); assertTrue("Invalid record.",Integer.parseInt(value.toString()) % 10 == 0); counter++; } assertEquals("Wrong number of records.",100,counter); } finally { try { fs.close(); } catch ( Exception e) { } System.setIn(isBackup); cluster.shutdown(); } }

    Class: org.apache.hadoop.streaming.TestMultipleCachefiles

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testMultipleCachefiles() throws Exception { boolean mayExit=false; MiniMRCluster mr=null; MiniDFSCluster dfs=null; try { Configuration conf=new Configuration(); dfs=new MiniDFSCluster.Builder(conf).build(); FileSystem fileSys=dfs.getFileSystem(); String namenode=fileSys.getUri().toString(); mr=new MiniMRCluster(1,namenode,3); List args=new ArrayList(); for ( Map.Entry entry : mr.createJobConf()) { args.add("-jobconf"); args.add(entry.getKey() + "=" + entry.getValue()); } String argv[]=new String[]{"-input",INPUT_FILE,"-output",OUTPUT_DIR,"-mapper",map,"-reducer",reduce,"-jobconf","stream.tmpdir=" + System.getProperty("test.build.data","/tmp"),"-jobconf",JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-jobconf",JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-cacheFile",fileSys.getUri() + CACHE_FILE + "#"+ mapString,"-cacheFile",fileSys.getUri() + CACHE_FILE_2 + "#"+ mapString2,"-jobconf","mapred.jar=" + TestStreaming.STREAMING_JAR}; for ( String arg : argv) { args.add(arg); } argv=args.toArray(new String[args.size()]); fileSys.delete(new Path(OUTPUT_DIR),true); DataOutputStream file=fileSys.create(new Path(INPUT_FILE)); file.writeBytes(mapString + "\n"); file.writeBytes(mapString2 + "\n"); file.close(); file=fileSys.create(new Path(CACHE_FILE)); file.writeBytes(cacheString + "\n"); file.close(); file=fileSys.create(new Path(CACHE_FILE_2)); file.writeBytes(cacheString2 + "\n"); file.close(); job=new StreamJob(argv,mayExit); job.go(); fileSys=dfs.getFileSystem(); String line=null; String line2=null; Path[] fileList=FileUtil.stat2Paths(fileSys.listStatus(new Path(OUTPUT_DIR),new Utils.OutputFileUtils.OutputFilesFilter())); for (int i=0; i < fileList.length; i++) { System.out.println(fileList[i].toString()); BufferedReader bread=new BufferedReader(new InputStreamReader(fileSys.open(fileList[i]))); line=bread.readLine(); System.out.println(line); line2=bread.readLine(); System.out.println(line2); } assertEquals(cacheString + "\t",line); assertEquals(cacheString2 + "\t",line2); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }

    Class: org.apache.hadoop.streaming.TestRawBytesStreaming

    APIUtilityVerifier EqualityVerifier 
    @Test public void testCommandLine() throws Exception { try { try { FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } catch ( Exception e) { } createInput(); OUTPUT_DIR.delete(); StreamJob job=new StreamJob(); job.setConf(new Configuration()); job.run(genArgs()); File outFile=new File(OUTPUT_DIR,"part-00000").getAbsoluteFile(); String output=StreamUtil.slurp(outFile); outFile.delete(); System.out.println(" map=" + map); System.out.println("reduce=" + reduce); System.err.println("outEx1=" + outputExpect); System.err.println(" out1=" + output); assertEquals(outputExpect,output); } finally { INPUT_FILE.delete(); FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } }

    Class: org.apache.hadoop.streaming.TestStreamAggregate

    APIUtilityVerifier EqualityVerifier 
    @Test public void testCommandLine() throws Exception { try { try { FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } catch ( Exception e) { } createInput(); boolean mayExit=false; job=new StreamJob(genArgs(),mayExit); job.go(); File outFile=new File(OUTPUT_DIR,"part-00000").getAbsoluteFile(); String output=StreamUtil.slurp(outFile); outFile.delete(); System.err.println("outEx1=" + outputExpect); System.err.println(" out1=" + output); assertEquals(outputExpect,output); } finally { INPUT_FILE.delete(); FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } }

    Class: org.apache.hadoop.streaming.TestStreamDataProtocol

    APIUtilityVerifier EqualityVerifier 
    @Test public void testCommandLine() throws Exception { try { FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } catch ( Exception e) { } try { createInput(); boolean mayExit=false; job=new StreamJob(genArgs(),mayExit); job.go(); File outFile=new File(OUTPUT_DIR,"part-00000").getAbsoluteFile(); String output=StreamUtil.slurp(outFile); outFile.delete(); System.err.println("outEx1=" + outputExpect); System.err.println(" out1=" + output); System.err.println(" equals=" + outputExpect.compareTo(output)); assertEquals(outputExpect,output); } finally { INPUT_FILE.delete(); FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } }

    Class: org.apache.hadoop.streaming.TestStreamJob

    InternalCallVerifier EqualityVerifier 
    @Test public void testOptions() throws Exception { StreamJob streamJob=new StreamJob(); assertEquals(1,streamJob.run(new String[0])); assertEquals(0,streamJob.run(new String[]{"-help"})); assertEquals(0,streamJob.run(new String[]{"-info"})); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testCreateJob() throws IOException { JobConf job; ArrayList dummyArgs=new ArrayList(); dummyArgs.add("-input"); dummyArgs.add("dummy"); dummyArgs.add("-output"); dummyArgs.add("dummy"); dummyArgs.add("-mapper"); dummyArgs.add("dummy"); dummyArgs.add("-reducer"); dummyArgs.add("dummy"); ArrayList args; args=new ArrayList(dummyArgs); args.add("-inputformat"); args.add("org.apache.hadoop.mapred.KeyValueTextInputFormat"); job=StreamJob.createJob(args.toArray(new String[]{})); assertEquals(KeyValueTextInputFormat.class,job.getInputFormat().getClass()); args=new ArrayList(dummyArgs); args.add("-inputformat"); args.add("org.apache.hadoop.mapred.SequenceFileInputFormat"); job=StreamJob.createJob(args.toArray(new String[]{})); assertEquals(SequenceFileInputFormat.class,job.getInputFormat().getClass()); args=new ArrayList(dummyArgs); args.add("-inputformat"); args.add("org.apache.hadoop.mapred.KeyValueTextInputFormat"); args.add("-inputreader"); args.add("StreamXmlRecordReader,begin=,end="); job=StreamJob.createJob(args.toArray(new String[]{})); assertEquals(StreamInputFormat.class,job.getInputFormat().getClass()); }

    Class: org.apache.hadoop.streaming.TestStreamReduceNone

    APIUtilityVerifier EqualityVerifier 
    @Test public void testCommandLine() throws Exception { String outFileName="part-00000"; File outFile=null; try { try { FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } catch ( Exception e) { } createInput(); boolean mayExit=false; job=new StreamJob(genArgs(),mayExit); job.go(); outFile=new File(OUTPUT_DIR,outFileName).getAbsoluteFile(); String output=StreamUtil.slurp(outFile); System.err.println("outEx1=" + outputExpect); System.err.println(" out1=" + output); assertEquals(outputExpect,output); } finally { INPUT_FILE.delete(); FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } }

    Class: org.apache.hadoop.streaming.TestStreaming

    EqualityVerifier 
    @Test public void testCommandLine() throws Exception { int ret=runStreamJob(); assertEquals(0,ret); checkOutput(); }

    Class: org.apache.hadoop.streaming.TestStreamingFailure

    EqualityVerifier 
    @Override @Test public void testCommandLine() throws IOException { int returnStatus=runStreamJob(); assertEquals("Streaming Job Failure code expected",5,returnStatus); }

    Class: org.apache.hadoop.streaming.TestStreamingSeparator

    APIUtilityVerifier EqualityVerifier 
    @Test public void testCommandLine() throws Exception { try { try { FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } catch ( Exception e) { } createInput(); boolean mayExit=false; job=new StreamJob(genArgs(),mayExit); job.go(); File outFile=new File(OUTPUT_DIR,"part-00000").getAbsoluteFile(); String output=StreamUtil.slurp(outFile); outFile.delete(); System.err.println("outEx1=" + outputExpect); System.err.println(" out1=" + output); assertEquals(outputExpect,output); } finally { INPUT_FILE.delete(); FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } }

    Class: org.apache.hadoop.streaming.TestSymLink

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=120000) public void testSymLink() throws Exception { boolean mayExit=false; MiniMRCluster mr=null; MiniDFSCluster dfs=null; try { Configuration conf=new Configuration(); dfs=new MiniDFSCluster.Builder(conf).build(); FileSystem fileSys=dfs.getFileSystem(); String namenode=fileSys.getUri().toString(); mr=new MiniMRCluster(1,namenode,3); List args=new ArrayList(); for ( Map.Entry entry : mr.createJobConf()) { args.add("-jobconf"); args.add(entry.getKey() + "=" + entry.getValue()); } String argv[]=new String[]{"-input",INPUT_FILE,"-output",OUTPUT_DIR,"-mapper",map,"-reducer",reduce,"-jobconf","stream.tmpdir=" + System.getProperty("test.build.data","/tmp"),"-jobconf",JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-jobconf",JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" + "-Dcontrib.name="+ System.getProperty("contrib.name")+ " "+ "-Dbuild.test="+ System.getProperty("build.test")+ " "+ conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,conf.get(JobConf.MAPRED_TASK_JAVA_OPTS,"")),"-cacheFile",fileSys.getUri() + CACHE_FILE + "#testlink","-jobconf","mapred.jar=" + TestStreaming.STREAMING_JAR}; for ( String arg : argv) { args.add(arg); } argv=args.toArray(new String[args.size()]); fileSys.delete(new Path(OUTPUT_DIR),true); DataOutputStream file=fileSys.create(new Path(INPUT_FILE)); file.writeBytes(mapString); file.close(); file=fileSys.create(new Path(CACHE_FILE)); file.writeBytes(cacheString); file.close(); job=new StreamJob(argv,mayExit); job.go(); fileSys=dfs.getFileSystem(); String line=null; Path[] fileList=FileUtil.stat2Paths(fileSys.listStatus(new Path(OUTPUT_DIR),new Utils.OutputFileUtils.OutputFilesFilter())); for (int i=0; i < fileList.length; i++) { System.out.println(fileList[i].toString()); BufferedReader bread=new BufferedReader(new InputStreamReader(fileSys.open(fileList[i]))); line=bread.readLine(); System.out.println(line); } assertEquals(cacheString + "\t",line); } finally { if (dfs != null) { dfs.shutdown(); } if (mr != null) { mr.shutdown(); } } }

    Class: org.apache.hadoop.streaming.TestTypedBytesStreaming

    APIUtilityVerifier EqualityVerifier 
    @Test public void testCommandLine() throws Exception { StreamJob job=new StreamJob(); job.setConf(new Configuration()); job.run(genArgs()); File outFile=new File(OUTPUT_DIR,"part-00000").getAbsoluteFile(); String output=StreamUtil.slurp(outFile); outFile.delete(); System.out.println(" map=" + map); System.out.println("reduce=" + reduce); System.err.println("outEx1=" + outputExpect); System.err.println(" out1=" + output); assertEquals(outputExpect,output); }

    Class: org.apache.hadoop.streaming.TestUnconsumedInput

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testUnconsumedInput() throws Exception { String outFileName="part-00000"; File outFile=null; try { try { FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } catch ( Exception e) { } createInput(); Configuration conf=new Configuration(); conf.set("stream.minRecWrittenToEnableSkip_","0"); job=new StreamJob(); job.setConf(conf); int exitCode=job.run(genArgs()); assertEquals("Job failed",0,exitCode); outFile=new File(OUTPUT_DIR,outFileName).getAbsoluteFile(); String output=StreamUtil.slurp(outFile); assertEquals("Output was truncated",EXPECTED_OUTPUT_SIZE,StringUtils.countMatches(output,"\t")); } finally { INPUT_FILE.delete(); FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile()); } }

    Class: org.apache.hadoop.streaming.io.TestKeyOnlyTextOutputReader

    InternalCallVerifier EqualityVerifier 
    @Test public void testKeyOnlyTextOutputReader() throws IOException { String text="key,value\nkey2,value2\nnocomma\n"; PipeMapRed pipeMapRed=new MyPipeMapRed(text); KeyOnlyTextOutputReader outputReader=new KeyOnlyTextOutputReader(); outputReader.initialize(pipeMapRed); outputReader.readKeyValue(); Assert.assertEquals(new Text("key,value"),outputReader.getCurrentKey()); outputReader.readKeyValue(); Assert.assertEquals(new Text("key2,value2"),outputReader.getCurrentKey()); outputReader.readKeyValue(); Assert.assertEquals(new Text("nocomma"),outputReader.getCurrentKey()); Assert.assertEquals(false,outputReader.readKeyValue()); }

    Class: org.apache.hadoop.streaming.mapreduce.TestStreamXmlRecordReader

    InternalCallVerifier EqualityVerifier 
    @Test public void testStreamXmlRecordReader() throws Exception { Job job=new Job(); Configuration conf=job.getConfiguration(); job.setJarByClass(TestStreamXmlRecordReader.class); job.setMapperClass(Mapper.class); conf.set("stream.recordreader.class","org.apache.hadoop.streaming.mapreduce.StreamXmlRecordReader"); conf.set("stream.recordreader.begin",""); conf.set("stream.recordreader.end",""); job.setInputFormatClass(StreamInputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(Text.class); FileInputFormat.addInputPath(job,new Path("target/input.xml")); OUTPUT_DIR=new Path("target/output"); fs=FileSystem.get(conf); if (fs.exists(OUTPUT_DIR)) { fs.delete(OUTPUT_DIR,true); } FileOutputFormat.setOutputPath(job,OUTPUT_DIR); boolean ret=job.waitForCompletion(true); assertEquals(true,ret); checkOutput(); }

    Class: org.apache.hadoop.test.TestHFSTestCase

    APIUtilityVerifier EqualityVerifier 
    @Test @TestJetty public void testJetty() throws Exception { Context context=new Context(); context.setContextPath("/"); context.addServlet(MyServlet.class,"/bar"); Server server=TestJettyHelper.getJettyServer(); server.addHandler(context); server.start(); URL url=new URL(TestJettyHelper.getJettyURL(),"/bar"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream())); assertEquals(reader.readLine(),"foo"); reader.close(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test @TestHdfs public void testHadoopFileSystem() throws Exception { Configuration conf=TestHdfsHelper.getHdfsConf(); FileSystem fs=FileSystem.get(conf); try { OutputStream os=fs.create(new Path(TestHdfsHelper.getHdfsTestDir(),"foo")); os.write(new byte[]{1}); os.close(); InputStream is=fs.open(new Path(TestHdfsHelper.getHdfsTestDir(),"foo")); assertEquals(is.read(),1); assertEquals(is.read(),-1); is.close(); } finally { fs.close(); } }

    EqualityVerifier 
    @Test public void waitFor(){ long start=Time.now(); long waited=waitFor(1000,new Predicate(){ @Override public boolean evaluate() throws Exception { return true; } } ); long end=Time.now(); assertEquals(waited,0,50); assertEquals(end - start - waited,0,50); }

    EqualityVerifier 
    @Test public void sleepRatio1(){ setWaitForRatio(1); long start=Time.now(); sleep(100); long end=Time.now(); assertEquals(end - start,100,50); }

    EqualityVerifier 
    @Test public void sleepRatio2(){ setWaitForRatio(1); long start=Time.now(); sleep(100); long end=Time.now(); assertEquals(end - start,100 * getWaitForRatio(),50 * getWaitForRatio()); }

    EqualityVerifier 
    @Test public void waitForTimeOutRatio2(){ setWaitForRatio(2); long start=Time.now(); long waited=waitFor(200,new Predicate(){ @Override public boolean evaluate() throws Exception { return false; } } ); long end=Time.now(); assertEquals(waited,-1); assertEquals(end - start,200 * getWaitForRatio(),50 * getWaitForRatio()); }

    EqualityVerifier 
    @Test public void waitForTimeOutRatio1(){ setWaitForRatio(1); long start=Time.now(); long waited=waitFor(200,new Predicate(){ @Override public boolean evaluate() throws Exception { return false; } } ); long end=Time.now(); assertEquals(waited,-1); assertEquals(end - start,200,50); }

    Class: org.apache.hadoop.test.TestHTestCase

    EqualityVerifier 
    @Test public void waitFor(){ long start=Time.now(); long waited=waitFor(1000,new Predicate(){ @Override public boolean evaluate() throws Exception { return true; } } ); long end=Time.now(); assertEquals(waited,0,50); assertEquals(end - start - waited,0,50); }

    APIUtilityVerifier EqualityVerifier 
    @Test @TestJetty public void testJetty() throws Exception { Context context=new Context(); context.setContextPath("/"); context.addServlet(MyServlet.class,"/bar"); Server server=TestJettyHelper.getJettyServer(); server.addHandler(context); server.start(); URL url=new URL(TestJettyHelper.getJettyURL(),"/bar"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); assertEquals(conn.getResponseCode(),HttpURLConnection.HTTP_OK); BufferedReader reader=new BufferedReader(new InputStreamReader(conn.getInputStream())); assertEquals(reader.readLine(),"foo"); reader.close(); }

    EqualityVerifier 
    @Test public void waitForTimeOutRatio1(){ setWaitForRatio(1); long start=Time.now(); long waited=waitFor(200,new Predicate(){ @Override public boolean evaluate() throws Exception { return false; } } ); long end=Time.now(); assertEquals(waited,-1); assertEquals(end - start,200,50); }

    EqualityVerifier 
    @Test public void waitForTimeOutRatio2(){ setWaitForRatio(2); long start=Time.now(); long waited=waitFor(200,new Predicate(){ @Override public boolean evaluate() throws Exception { return false; } } ); long end=Time.now(); assertEquals(waited,-1); assertEquals(end - start,200 * getWaitForRatio(),50 * getWaitForRatio()); }

    EqualityVerifier 
    @Test public void sleepRatio1(){ setWaitForRatio(1); long start=Time.now(); sleep(100); long end=Time.now(); assertEquals(end - start,100,50); }

    EqualityVerifier 
    @Test public void sleepRatio2(){ setWaitForRatio(1); long start=Time.now(); sleep(100); long end=Time.now(); assertEquals(end - start,100 * getWaitForRatio(),50 * getWaitForRatio()); }

    Class: org.apache.hadoop.test.TestMultithreadedTestUtil

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testNoErrors() throws Exception { final AtomicInteger threadsRun=new AtomicInteger(); TestContext ctx=new TestContext(); for (int i=0; i < 3; i++) { ctx.addThread(new TestingThread(ctx){ @Override public void doWork() throws Exception { threadsRun.incrementAndGet(); } } ); } assertEquals(0,threadsRun.get()); ctx.startThreads(); long st=Time.now(); ctx.waitFor(30000); long et=Time.now(); assertEquals(3,threadsRun.get()); assertTrue("Test took " + (et - st) + "ms",et - st < 5000); }

    UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testThreadThrowsCheckedException() throws Exception { TestContext ctx=new TestContext(); ctx.addThread(new TestingThread(ctx){ @Override public void doWork() throws Exception { throw new IOException("my ioe"); } } ); ctx.startThreads(); long st=Time.now(); try { ctx.waitFor(30000); fail("waitFor did not throw"); } catch ( RuntimeException rte) { assertEquals("my ioe",rte.getCause().getMessage()); } long et=Time.now(); assertTrue("Test took " + (et - st) + "ms",et - st < 5000); }

    UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testThreadFails() throws Exception { TestContext ctx=new TestContext(); ctx.addThread(new TestingThread(ctx){ @Override public void doWork() throws Exception { fail(FAIL_MSG); } } ); ctx.startThreads(); long st=Time.now(); try { ctx.waitFor(30000); fail("waitFor did not throw"); } catch ( RuntimeException rte) { assertEquals(FAIL_MSG,rte.getCause().getMessage()); } long et=Time.now(); assertTrue("Test took " + (et - st) + "ms",et - st < 5000); }

    Class: org.apache.hadoop.test.TestTimedOutTestsListener

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=500) public void testThreadDumpAndDeadlocks() throws Exception { new Deadlock(); String s=null; while (true) { s=TimedOutTestsListener.buildDeadlockInfo(); if (s != null) break; Thread.sleep(100); } Assert.assertEquals(3,countStringOccurrences(s,"BLOCKED")); Failure failure=new Failure(null,new Exception(TimedOutTestsListener.TEST_TIMED_OUT_PREFIX)); StringWriter writer=new StringWriter(); new TimedOutTestsListener(new PrintWriter(writer)).testFailure(failure); String out=writer.toString(); Assert.assertTrue(out.contains("THREAD DUMP")); Assert.assertTrue(out.contains("DEADLOCKS DETECTED")); System.out.println(out); }

    Class: org.apache.hadoop.tools.GetGroupsTestBase

    APIUtilityVerifier EqualityVerifier 
    @Test public void testMultipleNonExistingUsers() throws Exception { String actualOutput=runTool(conf,new String[]{"does-not-exist1","does-not-exist2"},true); assertEquals("Show the output for only the user given, with no groups",getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist1")) + getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist2")),actualOutput); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testNonExistentUser() throws Exception { String actualOutput=runTool(conf,new String[]{"does-not-exist"},true); assertEquals("Show the output for only the user given, with no groups",getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist")),actualOutput); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testMultipleExistingUsers() throws Exception { String actualOutput=runTool(conf,new String[]{testUser1.getUserName(),testUser2.getUserName()},true); assertEquals("Show the output for both users given",getExpectedOutput(testUser1) + getExpectedOutput(testUser2),actualOutput); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testNoUserGiven() throws Exception { String actualOutput=runTool(conf,new String[0],true); UserGroupInformation currentUser=UserGroupInformation.getCurrentUser(); assertEquals("No user provided should default to current user",getExpectedOutput(currentUser),actualOutput); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testExistingInterleavedWithNonExistentUsers() throws Exception { String actualOutput=runTool(conf,new String[]{"does-not-exist1",testUser1.getUserName(),"does-not-exist2",testUser2.getUserName()},true); assertEquals("Show the output for only the user given, with no groups",getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist1")) + getExpectedOutput(testUser1) + getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist2"))+ getExpectedOutput(testUser2),actualOutput); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testExistingUser() throws Exception { String actualOutput=runTool(conf,new String[]{testUser1.getUserName()},true); assertEquals("Show only the output of the user given",getExpectedOutput(testUser1),actualOutput); }

    Class: org.apache.hadoop.tools.TestCopyListing

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testBuildListing(){ FileSystem fs=null; try { fs=FileSystem.get(getConf()); List srcPaths=new ArrayList(); Path p1=new Path("/tmp/in/1"); Path p2=new Path("/tmp/in/2"); Path p3=new Path("/tmp/in2/2"); Path target=new Path("/tmp/out/1"); srcPaths.add(p1.getParent()); srcPaths.add(p3.getParent()); TestDistCpUtils.createFile(fs,"/tmp/in/1"); TestDistCpUtils.createFile(fs,"/tmp/in/2"); TestDistCpUtils.createFile(fs,"/tmp/in2/2"); fs.mkdirs(target); OutputStream out=fs.create(p1); out.write("ABC".getBytes()); out.close(); out=fs.create(p2); out.write("DEF".getBytes()); out.close(); out=fs.create(p3); out.write("GHIJ".getBytes()); out.close(); Path listingFile=new Path("/tmp/file"); DistCpOptions options=new DistCpOptions(srcPaths,target); options.setSyncFolder(true); CopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS); try { listing.buildListing(listingFile,options); Assert.fail("Duplicates not detected"); } catch ( DuplicateFileException ignore) { } Assert.assertEquals(listing.getBytesToCopy(),10); Assert.assertEquals(listing.getNumberOfPaths(),3); TestDistCpUtils.delete(fs,"/tmp"); try { listing.buildListing(listingFile,options); Assert.fail("Invalid input not detected"); } catch ( InvalidInputException ignore) { } TestDistCpUtils.delete(fs,"/tmp"); } catch ( IOException e) { LOG.error("Exception encountered ",e); Assert.fail("Test build listing failed"); } finally { TestDistCpUtils.delete(fs,"/tmp"); } }

    NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testFailOnCloseError() throws IOException { File inFile=File.createTempFile("TestCopyListingIn",null); inFile.deleteOnExit(); File outFile=File.createTempFile("TestCopyListingOut",null); outFile.deleteOnExit(); List srcs=new ArrayList(); srcs.add(new Path(inFile.toURI())); Exception expectedEx=new IOException("boom"); SequenceFile.Writer writer=mock(SequenceFile.Writer.class); doThrow(expectedEx).when(writer).close(); SimpleCopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS); DistCpOptions options=new DistCpOptions(srcs,new Path(outFile.toURI())); Exception actualEx=null; try { listing.doBuildListing(writer,options); } catch ( Exception e) { actualEx=e; } Assert.assertNotNull("close writer didn't fail",actualEx); Assert.assertEquals(expectedEx,actualEx); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testSkipCopy() throws Exception { SimpleCopyListing listing=new SimpleCopyListing(getConf(),CREDENTIALS){ @Override protected boolean shouldCopy( Path path, DistCpOptions options){ return !path.getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME); } } ; FileSystem fs=FileSystem.get(getConf()); List srcPaths=new ArrayList(); srcPaths.add(new Path("/tmp/in4/1")); srcPaths.add(new Path("/tmp/in4/2")); Path target=new Path("/tmp/out4/1"); TestDistCpUtils.createFile(fs,"/tmp/in4/1/_SUCCESS"); TestDistCpUtils.createFile(fs,"/tmp/in4/1/file"); TestDistCpUtils.createFile(fs,"/tmp/in4/2"); fs.mkdirs(target); DistCpOptions options=new DistCpOptions(srcPaths,target); Path listingFile=new Path("/tmp/list4"); listing.buildListing(listingFile,options); Assert.assertEquals(listing.getNumberOfPaths(),3); SequenceFile.Reader reader=new SequenceFile.Reader(getConf(),SequenceFile.Reader.file(listingFile)); CopyListingFileStatus fileStatus=new CopyListingFileStatus(); Text relativePath=new Text(); Assert.assertTrue(reader.next(relativePath,fileStatus)); Assert.assertEquals(relativePath.toString(),"/1"); Assert.assertTrue(reader.next(relativePath,fileStatus)); Assert.assertEquals(relativePath.toString(),"/1/file"); Assert.assertTrue(reader.next(relativePath,fileStatus)); Assert.assertEquals(relativePath.toString(),"/2"); Assert.assertFalse(reader.next(relativePath,fileStatus)); }

    Class: org.apache.hadoop.tools.TestExternalCall

    APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * test main method of DistCp. Method should to call System.exit(). */ @Test public void testCleanupTestViaToolRunner() throws IOException, InterruptedException { Configuration conf=getConf(); Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf); stagingDir.getFileSystem(conf).mkdirs(stagingDir); Path soure=createFile("tmp.txt"); Path target=createFile("target.txt"); try { String[] arg={target.toString(),soure.toString()}; DistCp.main(arg); Assert.fail(); } catch ( ExitException t) { Assert.assertTrue(fs.exists(target)); Assert.assertEquals(t.status,0); Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length,0); } }

    Class: org.apache.hadoop.tools.TestHadoopArchives

    BranchVerifier InternalCallVerifier EqualityVerifier PublicFieldVerifier 
    @Test public void testReadFileContent() throws Exception { fileList.add(createFile(inputPath,fs,"c c")); final Path sub1=new Path(inputPath,"sub 1"); fs.mkdirs(sub1); fileList.add(createFile(inputPath,fs,sub1.getName(),"file x y z")); fileList.add(createFile(inputPath,fs,sub1.getName(),"file")); fileList.add(createFile(inputPath,fs,sub1.getName(),"x")); fileList.add(createFile(inputPath,fs,sub1.getName(),"y")); fileList.add(createFile(inputPath,fs,sub1.getName(),"z")); final Path sub2=new Path(inputPath,"sub 1 with suffix"); fs.mkdirs(sub2); fileList.add(createFile(inputPath,fs,sub2.getName(),"z")); final byte[] binContent=prepareBin(); fileList.add(createFile(inputPath,fs,binContent,sub2.getName(),"bin")); fileList.add(createFile(inputPath,fs,new byte[0],sub2.getName(),"zero-length")); final String fullHarPathStr=makeArchive(); final HarFileSystem harFileSystem=new HarFileSystem(fs); try { final URI harUri=new URI(fullHarPathStr); harFileSystem.initialize(harUri,fs.getConf()); int readFileCount=0; for ( final String pathStr0 : fileList) { final Path path=new Path(fullHarPathStr + Path.SEPARATOR + pathStr0); final String baseName=path.getName(); final FileStatus status=harFileSystem.getFileStatus(path); if (status.isFile()) { final byte[] actualContentSimple=readAllSimple(harFileSystem.open(path),true); final byte[] actualContentBuffer=readAllWithBuffer(harFileSystem.open(path),true); assertArrayEquals(actualContentSimple,actualContentBuffer); final byte[] actualContentFully=readAllWithReadFully(actualContentSimple.length,harFileSystem.open(path),true); assertArrayEquals(actualContentSimple,actualContentFully); final byte[] actualContentSeek=readAllWithSeek(actualContentSimple.length,harFileSystem.open(path),true); assertArrayEquals(actualContentSimple,actualContentSeek); final byte[] actualContentRead4=readAllWithRead4(harFileSystem.open(path),true); assertArrayEquals(actualContentSimple,actualContentRead4); final byte[] actualContentSkip=readAllWithSkip(actualContentSimple.length,harFileSystem.open(path),harFileSystem.open(path),true); assertArrayEquals(actualContentSimple,actualContentSkip); if ("bin".equals(baseName)) { assertArrayEquals(binContent,actualContentSimple); } else if ("zero-length".equals(baseName)) { assertEquals(0,actualContentSimple.length); } else { String actual=new String(actualContentSimple,"UTF-8"); assertEquals(baseName,actual); } readFileCount++; } } assertEquals(fileList.size(),readFileCount); } finally { harFileSystem.close(); } }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testRelativePath() throws Exception { final Path sub1=new Path(inputPath,"dir1"); fs.mkdirs(sub1); createFile(inputPath,fs,sub1.getName(),"a"); final FsShell shell=new FsShell(conf); final List originalPaths=lsr(shell,"input"); System.out.println("originalPaths: " + originalPaths); final String fullHarPathStr=makeArchive(); final List harPaths=lsr(shell,fullHarPathStr); Assert.assertEquals(originalPaths,harPaths); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testCopyToLocal() throws Exception { final String fullHarPathStr=makeArchive(); final String tmpDir=System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp"; final Path tmpPath=new Path(tmpDir); final LocalFileSystem localFs=FileSystem.getLocal(new Configuration()); localFs.delete(tmpPath,true); localFs.mkdirs(tmpPath); assertTrue(localFs.exists(tmpPath)); final HarFileSystem harFileSystem=new HarFileSystem(fs); try { final URI harUri=new URI(fullHarPathStr); harFileSystem.initialize(harUri,fs.getConf()); final Path sourcePath=new Path(fullHarPathStr + Path.SEPARATOR + "a"); final Path targetPath=new Path(tmpPath,"straus"); harFileSystem.copyToLocalFile(false,sourcePath,targetPath); FileStatus straus=localFs.getFileStatus(targetPath); assertEquals(1,straus.getLen()); } finally { harFileSystem.close(); localFs.delete(tmpPath,true); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testPathWithSpaces() throws Exception { createFile(inputPath,fs,"c c"); final Path sub1=new Path(inputPath,"sub 1"); fs.mkdirs(sub1); createFile(sub1,fs,"file x y z"); createFile(sub1,fs,"file"); createFile(sub1,fs,"x"); createFile(sub1,fs,"y"); createFile(sub1,fs,"z"); final Path sub2=new Path(inputPath,"sub 1 with suffix"); fs.mkdirs(sub2); createFile(sub2,fs,"z"); final FsShell shell=new FsShell(conf); final String inputPathStr=inputPath.toUri().getPath(); final List originalPaths=lsr(shell,inputPathStr); final String fullHarPathStr=makeArchive(); final List harPaths=lsr(shell,fullHarPathStr); Assert.assertEquals(originalPaths,harPaths); }

    Class: org.apache.hadoop.tools.TestIntegration

    UtilityVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test(timeout=100000) public void testOverwrite(){ byte[] contents1="contents1".getBytes(); byte[] contents2="contents2".getBytes(); Assert.assertEquals(contents1.length,contents2.length); try { addEntries(listFile,"srcdir"); createWithContents("srcdir/file1",contents1); createWithContents("dstdir/file1",contents2); Path target=new Path(root + "/dstdir"); runTest(listFile,target,false,false,false,true); checkResult(target,1,"file1"); FSDataInputStream is=fs.open(new Path(root + "/dstdir/file1")); byte[] dstContents=new byte[contents1.length]; is.readFully(dstContents); is.close(); Assert.assertArrayEquals(contents1,dstContents); } catch ( IOException e) { LOG.error("Exception encountered while running distcp",e); Assert.fail("distcp failure"); } finally { TestDistCpUtils.delete(fs,root); TestDistCpUtils.delete(fs,"target/tmp1"); } }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=100000) public void testCleanup(){ try { Path sourcePath=new Path("noscheme:///file"); List sources=new ArrayList(); sources.add(sourcePath); DistCpOptions options=new DistCpOptions(sources,target); Configuration conf=getConf(); Path stagingDir=JobSubmissionFiles.getStagingDir(new Cluster(conf),conf); stagingDir.getFileSystem(conf).mkdirs(stagingDir); try { new DistCp(conf,options).execute(); } catch ( Throwable t) { Assert.assertEquals(stagingDir.getFileSystem(conf).listStatus(stagingDir).length,0); } } catch ( Exception e) { LOG.error("Exception encountered ",e); Assert.fail("testCleanup failed " + e.getMessage()); } }

    Class: org.apache.hadoop.tools.TestJMXGet

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * test JMX connection to DataNode.. * @throws Exception */ @Test public void testDataNode() throws Exception { int numDatanodes=2; cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build(); cluster.waitActive(); DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/test"),fileSize,fileSize,blockSize,(short)2,seed); JMXGet jmx=new JMXGet(); String serviceName="DataNode"; jmx.setService(serviceName); jmx.init(); assertEquals(fileSize,Integer.parseInt(jmx.getValue("BytesWritten"))); cluster.shutdown(); MBeanServerConnection mbsc=ManagementFactory.getPlatformMBeanServer(); ObjectName query=new ObjectName("Hadoop:service=" + serviceName + ",*"); Set names=mbsc.queryNames(query,null); assertTrue("No beans should be registered for " + serviceName,names.isEmpty()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * test JMX connection to NameNode.. * @throws Exception */ @Test public void testNameNode() throws Exception { int numDatanodes=2; cluster=new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).build(); cluster.waitActive(); DFSTestUtil.createFile(cluster.getFileSystem(),new Path("/test1"),fileSize,fileSize,blockSize,(short)2,seed); JMXGet jmx=new JMXGet(); String serviceName="NameNode"; jmx.setService(serviceName); jmx.init(); assertTrue("error printAllValues",checkPrintAllValues(jmx)); assertEquals(numDatanodes,Integer.parseInt(jmx.getValue("NumLiveDataNodes"))); assertGauge("CorruptBlocks",Long.parseLong(jmx.getValue("CorruptBlocks")),getMetrics("FSNamesystem")); assertEquals(numDatanodes,Integer.parseInt(jmx.getValue("NumOpenConnections"))); cluster.shutdown(); MBeanServerConnection mbsc=ManagementFactory.getPlatformMBeanServer(); ObjectName query=new ObjectName("Hadoop:service=" + serviceName + ",*"); Set names=mbsc.queryNames(query,null); assertTrue("No beans should be registered for " + serviceName,names.isEmpty()); }

    Class: org.apache.hadoop.tools.TestOptionsParser

    NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testLogPath(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertNull(options.getLogPath()); options=OptionsParser.parse(new String[]{"-log","hdfs://localhost:8020/logs","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getLogPath(),new Path("hdfs://localhost:8020/logs")); }

    UtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testParseWorkPath(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertNull(options.getAtomicWorkPath()); options=OptionsParser.parse(new String[]{"-atomic","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertNull(options.getAtomicWorkPath()); options=OptionsParser.parse(new String[]{"-atomic","-tmp","hdfs://localhost:8020/work","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getAtomicWorkPath(),new Path("hdfs://localhost:8020/work")); try { OptionsParser.parse(new String[]{"-tmp","hdfs://localhost:8020/work","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.fail("work path was allowed without -atomic switch"); } catch ( IllegalArgumentException ignore) { } }

    EqualityVerifier 
    @Test public void testCopyStrategy(){ DistCpOptions options=OptionsParser.parse(new String[]{"-strategy","dynamic","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getCopyStrategy(),"dynamic"); options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getCopyStrategy(),DistCpConstants.UNIFORMSIZE); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testParseMaps(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMaxMaps(),DistCpConstants.DEFAULT_MAPS); options=OptionsParser.parse(new String[]{"-m","1","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMaxMaps(),1); options=OptionsParser.parse(new String[]{"-m","0","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMaxMaps(),1); try { OptionsParser.parse(new String[]{"-m","hello","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.fail("Non numberic map parsed"); } catch ( IllegalArgumentException ignore) { } try { OptionsParser.parse(new String[]{"-mapredXslConf","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.fail("Non numberic map parsed"); } catch ( IllegalArgumentException ignore) { } }

    EqualityVerifier 
    @Test public void testTargetPath(){ DistCpOptions options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getTargetPath(),new Path("hdfs://localhost:8020/target/")); }

    EqualityVerifier 
    @Test public void testParsebandwidth(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMapBandwidth(),DistCpConstants.DEFAULT_BANDWIDTH_MB); options=OptionsParser.parse(new String[]{"-bandwidth","11","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getMapBandwidth(),11); }

    IdentityVerifier EqualityVerifier HybridVerifier 
    @Test public void testToString(){ DistCpOptions option=new DistCpOptions(new Path("abc"),new Path("xyz")); String val="DistCpOptions{atomicCommit=false, syncFolder=false, deleteMissing=false, " + "ignoreFailures=false, maxMaps=20, sslConfigurationFile='null', copyStrategy='uniformsize', " + "sourceFileListing=abc, sourcePaths=null, targetPath=xyz, targetPathExists=true, "+ "preserveRawXattrs=false}"; Assert.assertEquals(val,option.toString()); Assert.assertNotSame(DistCpOptionSwitch.ATOMIC_COMMIT.toString(),DistCpOptionSwitch.ATOMIC_COMMIT.name()); }

    NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testParseSSLConf(){ DistCpOptions options=OptionsParser.parse(new String[]{"hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertNull(options.getSslConfigurationFile()); options=OptionsParser.parse(new String[]{"-mapredSslConf","/tmp/ssl-client.xml","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getSslConfigurationFile(),"/tmp/ssl-client.xml"); }

    EqualityVerifier 
    @Test public void testSourceListing(){ DistCpOptions options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertEquals(options.getSourceFileListing(),new Path("hdfs://localhost:8020/source/first")); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testPreserve(){ DistCpOptions options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); options=OptionsParser.parse(new String[]{"-p","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-p","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-pbr","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-pbrgup","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-pbrgupcax","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertTrue(options.shouldPreserve(FileAttribute.USER)); Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertTrue(options.shouldPreserve(FileAttribute.ACL)); Assert.assertTrue(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-pc","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); Assert.assertFalse(options.shouldPreserve(FileAttribute.USER)); Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP)); Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE)); Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL)); Assert.assertFalse(options.shouldPreserve(FileAttribute.XATTR)); options=OptionsParser.parse(new String[]{"-p","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); int i=0; Iterator attribIterator=options.preserveAttributes(); while (attribIterator.hasNext()) { attribIterator.next(); i++; } Assert.assertEquals(i,6); try { OptionsParser.parse(new String[]{"-pabcd","-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target"}); Assert.fail("Invalid preserve attribute"); } catch ( IllegalArgumentException ignore) { } catch ( NoSuchElementException ignore) { } options=OptionsParser.parse(new String[]{"-f","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION)); options.preserve(FileAttribute.PERMISSION); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); options.preserve(FileAttribute.PERMISSION); Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION)); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testOptionsAppendToConf(){ Configuration conf=new Configuration(); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),false)); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false)); DistCpOptions options=OptionsParser.parse(new String[]{"-atomic","-i","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),false)); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.ATOMIC_COMMIT.getConfigLabel(),false)); Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(),-1),DistCpConstants.DEFAULT_BANDWIDTH_MB); conf=new Configuration(); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false)); Assert.assertFalse(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(),false)); Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()),null); options=OptionsParser.parse(new String[]{"-update","-delete","-pu","-bandwidth","11","hdfs://localhost:8020/source/first","hdfs://localhost:8020/target/"}); options.appendToConf(conf); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),false)); Assert.assertTrue(conf.getBoolean(DistCpOptionSwitch.DELETE_MISSING.getConfigLabel(),false)); Assert.assertEquals(conf.get(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel()),"U"); Assert.assertEquals(conf.getInt(DistCpOptionSwitch.BANDWIDTH.getConfigLabel(),-1),11); }

    Class: org.apache.hadoop.tools.mapred.TestCopyCommitter

    APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testDeleteMissingFlatInterleavedFiles(){ TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config); JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf=jobContext.getConfiguration(); String sourceBase; String targetBase; FileSystem fs=null; try { OutputCommitter committer=new CopyCommitter(null,taskAttemptContext); fs=FileSystem.get(conf); sourceBase="/tmp1/" + String.valueOf(rand.nextLong()); targetBase="/tmp1/" + String.valueOf(rand.nextLong()); TestDistCpUtils.createFile(fs,sourceBase + "/1"); TestDistCpUtils.createFile(fs,sourceBase + "/3"); TestDistCpUtils.createFile(fs,sourceBase + "/4"); TestDistCpUtils.createFile(fs,sourceBase + "/5"); TestDistCpUtils.createFile(fs,sourceBase + "/7"); TestDistCpUtils.createFile(fs,sourceBase + "/8"); TestDistCpUtils.createFile(fs,sourceBase + "/9"); TestDistCpUtils.createFile(fs,targetBase + "/2"); TestDistCpUtils.createFile(fs,targetBase + "/4"); TestDistCpUtils.createFile(fs,targetBase + "/5"); TestDistCpUtils.createFile(fs,targetBase + "/7"); TestDistCpUtils.createFile(fs,targetBase + "/9"); TestDistCpUtils.createFile(fs,targetBase + "/A"); DistCpOptions options=new DistCpOptions(Arrays.asList(new Path(sourceBase)),new Path("/out")); options.setSyncFolder(true); options.setDeleteMissing(true); options.appendToConf(conf); CopyListing listing=new GlobbedCopyListing(conf,CREDENTIALS); Path listingFile=new Path("/tmp1/" + String.valueOf(rand.nextLong())); listing.buildListing(listingFile,options); conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,targetBase); conf.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,targetBase); committer.commitJob(jobContext); if (!TestDistCpUtils.checkIfFoldersAreInSync(fs,targetBase,sourceBase)) { Assert.fail("Source and target folders are not in sync"); } Assert.assertEquals(fs.listStatus(new Path(targetBase)).length,4); committer.commitJob(jobContext); if (!TestDistCpUtils.checkIfFoldersAreInSync(fs,targetBase,sourceBase)) { Assert.fail("Source and target folders are not in sync"); } Assert.assertEquals(fs.listStatus(new Path(targetBase)).length,4); } catch ( IOException e) { LOG.error("Exception encountered while testing for delete missing",e); Assert.fail("Delete missing failure"); } finally { TestDistCpUtils.delete(fs,"/tmp1"); conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING,"false"); } }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNoCommitAction(){ TaskAttemptContext taskAttemptContext=getTaskAttemptContext(config); JobContext jobContext=new JobContextImpl(taskAttemptContext.getConfiguration(),taskAttemptContext.getTaskAttemptID().getJobID()); try { OutputCommitter committer=new CopyCommitter(null,taskAttemptContext); committer.commitJob(jobContext); Assert.assertEquals(taskAttemptContext.getStatus(),"Commit Successful"); committer.commitJob(jobContext); Assert.assertEquals(taskAttemptContext.getStatus(),"Commit Successful"); } catch ( IOException e) { LOG.error("Exception encountered ",e); Assert.fail("Commit failed"); } }

    Class: org.apache.hadoop.tools.mapred.TestCopyMapper

    InternalCallVerifier EqualityVerifier 
    @Test public void testCopyWithAppend() throws Exception { final FileSystem fs=cluster.getFileSystem(); testCopy(false); appendSourceData(); CopyMapper copyMapper=new CopyMapper(); StubContext stubContext=new StubContext(getConfiguration(),null,0); Mapper.Context context=stubContext.getContext(); context.getConfiguration().setBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(),true); copyMapper.setup(context); for ( Path path : pathList) { copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),path)),new CopyListingFileStatus(cluster.getFileSystem().getFileStatus(path)),context); } verifyCopy(fs,false); Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE * 2,stubContext.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED).getValue()); Assert.assertEquals(pathList.size(),stubContext.getReporter().getCounter(CopyMapper.Counter.COPY).getValue()); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=40000) public void testSkipCopyNoPerms(){ try { deleteState(); createSourceData(); UserGroupInformation tmpUser=UserGroupInformation.createRemoteUser("guest"); final CopyMapper copyMapper=new CopyMapper(); final StubContext stubContext=tmpUser.doAs(new PrivilegedAction(){ @Override public StubContext run(){ try { return new StubContext(getConfiguration(),null,0); } catch ( Exception e) { LOG.error("Exception encountered ",e); throw new RuntimeException(e); } } } ); final Mapper.Context context=stubContext.getContext(); EnumSet preserveStatus=EnumSet.allOf(DistCpOptions.FileAttribute.class); preserveStatus.remove(DistCpOptions.FileAttribute.ACL); preserveStatus.remove(DistCpOptions.FileAttribute.XATTR); context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,DistCpUtils.packAttributes(preserveStatus)); touchFile(SOURCE_PATH + "/src/file"); touchFile(TARGET_PATH + "/src/file"); cluster.getFileSystem().setPermission(new Path(SOURCE_PATH + "/src/file"),new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ)); cluster.getFileSystem().setPermission(new Path(TARGET_PATH + "/src/file"),new FsPermission(FsAction.READ,FsAction.READ,FsAction.READ)); final FileSystem tmpFS=tmpUser.doAs(new PrivilegedAction(){ @Override public FileSystem run(){ try { return FileSystem.get(configuration); } catch ( IOException e) { LOG.error("Exception encountered ",e); Assert.fail("Test failed: " + e.getMessage()); throw new RuntimeException("Test ought to fail here"); } } } ); tmpUser.doAs(new PrivilegedAction(){ @Override public Integer run(){ try { copyMapper.setup(context); copyMapper.map(new Text("/src/file"),new CopyListingFileStatus(tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file"))),context); Assert.assertEquals(stubContext.getWriter().values().size(),1); Assert.assertTrue(stubContext.getWriter().values().get(0).toString().startsWith("SKIP")); Assert.assertTrue(stubContext.getWriter().values().get(0).toString().contains(SOURCE_PATH + "/src/file")); } catch ( Exception e) { throw new RuntimeException(e); } return null; } } ); } catch ( Exception e) { LOG.error("Exception encountered ",e); Assert.fail("Test failed: " + e.getMessage()); } }

    Class: org.apache.hadoop.tools.mapred.TestCopyOutputFormat

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testSetWorkingDirectory(){ try { Job job=Job.getInstance(new Configuration()); Assert.assertEquals(null,CopyOutputFormat.getWorkingDirectory(job)); job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,""); Assert.assertEquals(null,CopyOutputFormat.getWorkingDirectory(job)); Path directory=new Path("/tmp/test"); CopyOutputFormat.setWorkingDirectory(job,directory); Assert.assertEquals(directory,CopyOutputFormat.getWorkingDirectory(job)); Assert.assertEquals(directory.toString(),job.getConfiguration().get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH)); } catch ( IOException e) { LOG.error("Exception encountered while running test",e); Assert.fail("Failed while testing for set Working Directory"); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testSetCommitDirectory(){ try { Job job=Job.getInstance(new Configuration()); Assert.assertEquals(null,CopyOutputFormat.getCommitDirectory(job)); job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,""); Assert.assertEquals(null,CopyOutputFormat.getCommitDirectory(job)); Path directory=new Path("/tmp/test"); CopyOutputFormat.setCommitDirectory(job,directory); Assert.assertEquals(directory,CopyOutputFormat.getCommitDirectory(job)); Assert.assertEquals(directory.toString(),job.getConfiguration().get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH)); } catch ( IOException e) { LOG.error("Exception encountered while running test",e); Assert.fail("Failed while testing for set Commit Directory"); } }

    Class: org.apache.hadoop.tools.mapred.TestRetriableFileCopyCommand

    NullVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("rawtypes") @Test public void testFailOnCloseError() throws Exception { Mapper.Context context=mock(Mapper.Context.class); doReturn(new Configuration()).when(context).getConfiguration(); Exception expectedEx=new IOException("boom"); OutputStream out=mock(OutputStream.class); doThrow(expectedEx).when(out).close(); File f=File.createTempFile(this.getClass().getSimpleName(),null); f.deleteOnExit(); FileStatus stat=new FileStatus(1L,false,1,1024,0,new Path(f.toURI())); Exception actualEx=null; try { new RetriableFileCopyCommand("testFailOnCloseError",FileAction.OVERWRITE).copyBytes(stat,0,out,512,context); } catch ( Exception e) { actualEx=e; } assertNotNull("close didn't fail",actualEx); assertEquals(expectedEx,actualEx); }

    Class: org.apache.hadoop.tools.mapred.lib.TestDynamicInputFormat

    IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetSplits() throws Exception { DistCpOptions options=getOptions(); Configuration configuration=new Configuration(); configuration.set("mapred.map.tasks",String.valueOf(options.getMaxMaps())); CopyListing.getCopyListing(configuration,CREDENTIALS,options).buildListing(new Path(cluster.getFileSystem().getUri().toString() + "/tmp/testDynInputFormat/fileList.seq"),options); JobContext jobContext=new JobContextImpl(configuration,new JobID()); DynamicInputFormat inputFormat=new DynamicInputFormat(); List splits=inputFormat.getSplits(jobContext); int nFiles=0; int taskId=0; for ( InputSplit split : splits) { RecordReader recordReader=inputFormat.createRecordReader(split,null); StubContext stubContext=new StubContext(jobContext.getConfiguration(),recordReader,taskId); final TaskAttemptContext taskAttemptContext=stubContext.getContext(); recordReader.initialize(splits.get(0),taskAttemptContext); float previousProgressValue=0f; while (recordReader.nextKeyValue()) { CopyListingFileStatus fileStatus=recordReader.getCurrentValue(); String source=fileStatus.getPath().toString(); System.out.println(source); Assert.assertTrue(expectedFilePaths.contains(source)); final float progress=recordReader.getProgress(); Assert.assertTrue(progress >= previousProgressValue); Assert.assertTrue(progress >= 0.0f); Assert.assertTrue(progress <= 1.0f); previousProgressValue=progress; ++nFiles; } Assert.assertTrue(recordReader.getProgress() == 1.0f); ++taskId; } Assert.assertEquals(expectedFilePaths.size(),nFiles); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetSplitRatio() throws Exception { Assert.assertEquals(1,DynamicInputFormat.getSplitRatio(1,1000000000)); Assert.assertEquals(2,DynamicInputFormat.getSplitRatio(11000000,10)); Assert.assertEquals(4,DynamicInputFormat.getSplitRatio(30,700)); Assert.assertEquals(2,DynamicInputFormat.getSplitRatio(30,200)); Configuration conf=new Configuration(); conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE,-1); conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL,-1); conf.setInt(DistCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK,-1); conf.setInt(DistCpConstants.CONF_LABEL_SPLIT_RATIO,-1); Assert.assertEquals(1,DynamicInputFormat.getSplitRatio(1,1000000000,conf)); Assert.assertEquals(2,DynamicInputFormat.getSplitRatio(11000000,10,conf)); Assert.assertEquals(4,DynamicInputFormat.getSplitRatio(30,700,conf)); Assert.assertEquals(2,DynamicInputFormat.getSplitRatio(30,200,conf)); conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE,100); conf.setInt(DistCpConstants.CONF_LABEL_MAX_CHUNKS_IDEAL,30); conf.setInt(DistCpConstants.CONF_LABEL_MIN_RECORDS_PER_CHUNK,10); conf.setInt(DistCpConstants.CONF_LABEL_SPLIT_RATIO,53); Assert.assertEquals(53,DynamicInputFormat.getSplitRatio(3,200,conf)); }

    Class: org.apache.hadoop.tools.util.TestDistCpUtils

    EqualityVerifier 
    @Test public void testPackAttributes(){ EnumSet attributes=EnumSet.noneOf(FileAttribute.class); Assert.assertEquals(DistCpUtils.packAttributes(attributes),""); attributes.add(FileAttribute.REPLICATION); Assert.assertEquals(DistCpUtils.packAttributes(attributes),"R"); Assert.assertEquals(attributes,DistCpUtils.unpackAttributes("R")); attributes.add(FileAttribute.BLOCKSIZE); Assert.assertEquals(DistCpUtils.packAttributes(attributes),"RB"); Assert.assertEquals(attributes,DistCpUtils.unpackAttributes("RB")); attributes.add(FileAttribute.USER); Assert.assertEquals(DistCpUtils.packAttributes(attributes),"RBU"); Assert.assertEquals(attributes,DistCpUtils.unpackAttributes("RBU")); attributes.add(FileAttribute.GROUP); Assert.assertEquals(DistCpUtils.packAttributes(attributes),"RBUG"); Assert.assertEquals(attributes,DistCpUtils.unpackAttributes("RBUG")); attributes.add(FileAttribute.PERMISSION); Assert.assertEquals(DistCpUtils.packAttributes(attributes),"RBUGP"); Assert.assertEquals(attributes,DistCpUtils.unpackAttributes("RBUGP")); }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testPreserve(){ try { FileSystem fs=FileSystem.get(config); EnumSet attributes=EnumSet.noneOf(FileAttribute.class); Path path=new Path("/tmp/abc"); Path src=new Path("/tmp/src"); fs.mkdirs(path); fs.mkdirs(src); CopyListingFileStatus srcStatus=new CopyListingFileStatus(fs.getFileStatus(src)); FsPermission noPerm=new FsPermission((short)0); fs.setPermission(path,noPerm); fs.setOwner(path,"nobody","nobody"); DistCpUtils.preserve(fs,path,srcStatus,attributes,false); FileStatus target=fs.getFileStatus(path); Assert.assertEquals(target.getPermission(),noPerm); Assert.assertEquals(target.getOwner(),"nobody"); Assert.assertEquals(target.getGroup(),"nobody"); attributes.add(FileAttribute.PERMISSION); DistCpUtils.preserve(fs,path,srcStatus,attributes,false); target=fs.getFileStatus(path); Assert.assertEquals(target.getPermission(),srcStatus.getPermission()); Assert.assertEquals(target.getOwner(),"nobody"); Assert.assertEquals(target.getGroup(),"nobody"); attributes.add(FileAttribute.GROUP); attributes.add(FileAttribute.USER); DistCpUtils.preserve(fs,path,srcStatus,attributes,false); target=fs.getFileStatus(path); Assert.assertEquals(target.getPermission(),srcStatus.getPermission()); Assert.assertEquals(target.getOwner(),srcStatus.getOwner()); Assert.assertEquals(target.getGroup(),srcStatus.getGroup()); fs.delete(path,true); fs.delete(src,true); } catch ( IOException e) { LOG.error("Exception encountered ",e); Assert.fail("Preserve test failure"); } }

    EqualityVerifier 
    @Test public void testGetRelativePathRoot(){ Path root=new Path("/tmp/abc"); Path child=new Path("/tmp/abc/xyz/file"); Assert.assertEquals(DistCpUtils.getRelativePath(root,child),"/xyz/file"); root=new Path("/"); child=new Path("/a"); Assert.assertEquals(DistCpUtils.getRelativePath(root,child),"/a"); }

    Class: org.apache.hadoop.util.TestApplicationClassLoader

    APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetResource() throws IOException { URL testJar=makeTestJar().toURI().toURL(); ClassLoader currentClassLoader=getClass().getClassLoader(); ClassLoader appClassloader=new ApplicationClassLoader(new URL[]{testJar},currentClassLoader,null); assertNull("Resource should be null for current classloader",currentClassLoader.getResourceAsStream("resource.txt")); InputStream in=appClassloader.getResourceAsStream("resource.txt"); assertNotNull("Resource should not be null for app classloader",in); assertEquals("hello",IOUtils.toString(in)); }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testConstructUrlsFromClasspath() throws Exception { File file=new File(testDir,"file"); assertTrue("Create file",file.createNewFile()); File dir=new File(testDir,"dir"); assertTrue("Make dir",dir.mkdir()); File jarsDir=new File(testDir,"jarsdir"); assertTrue("Make jarsDir",jarsDir.mkdir()); File nonJarFile=new File(jarsDir,"nonjar"); assertTrue("Create non-jar file",nonJarFile.createNewFile()); File jarFile=new File(jarsDir,"a.jar"); assertTrue("Create jar file",jarFile.createNewFile()); File nofile=new File(testDir,"nofile"); StringBuilder cp=new StringBuilder(); cp.append(file.getAbsolutePath()).append(File.pathSeparator).append(dir.getAbsolutePath()).append(File.pathSeparator).append(jarsDir.getAbsolutePath() + "/*").append(File.pathSeparator).append(nofile.getAbsolutePath()).append(File.pathSeparator).append(nofile.getAbsolutePath() + "/*").append(File.pathSeparator); URL[] urls=constructUrlsFromClasspath(cp.toString()); assertEquals(3,urls.length); assertEquals(file.toURI().toURL(),urls[0]); assertEquals(dir.toURI().toURL(),urls[1]); assertEquals(jarFile.toURI().toURL(),urls[2]); }

    Class: org.apache.hadoop.util.TestAsyncDiskService

    BranchVerifier UtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * This test creates some ExampleTasks and runs them. */ @Test public void testAsyncDiskService() throws Throwable { String[] vols=new String[]{"/0","/1"}; AsyncDiskService service=new AsyncDiskService(vols); int total=100; for (int i=0; i < total; i++) { service.execute(vols[i % 2],new ExampleTask()); } Exception e=null; try { service.execute("no_such_volume",new ExampleTask()); } catch ( RuntimeException ex) { e=ex; } assertNotNull("Executing a task on a non-existing volume should throw an " + "Exception.",e); service.shutdown(); if (!service.awaitTermination(5000)) { fail("AsyncDiskService didn't shutdown in 5 seconds."); } assertEquals(total,count); }

    Class: org.apache.hadoop.util.TestClasspath

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGlob(){ Classpath.main(new String[]{"--glob"}); String strOut=new String(stdout.toByteArray(),UTF8); assertEquals(System.getProperty("java.class.path"),strOut.trim()); assertTrue(stderr.toByteArray().length == 0); }

    Class: org.apache.hadoop.util.TestDataChecksum

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testEquality(){ assertEquals(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512),DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512)); assertFalse(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512).equals(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,1024))); assertFalse(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512).equals(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C,512))); }

    EqualityVerifier 
    @Test public void testToString(){ assertEquals("DataChecksum(type=CRC32, chunkSize=512)",DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,512).toString()); }

    Class: org.apache.hadoop.util.TestDirectBufferPool

    InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @Test public void testBuffersAreReset(){ ByteBuffer a=pool.getBuffer(100); a.putInt(0xdeadbeef); assertEquals(96,a.remaining()); pool.returnBuffer(a); ByteBuffer b=pool.getBuffer(100); assertSame(a,b); assertEquals(100,a.remaining()); pool.returnBuffer(b); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testWeakRefClearing(){ List bufs=Lists.newLinkedList(); for (int i=0; i < 10; i++) { ByteBuffer buf=pool.getBuffer(100); bufs.add(buf); } for ( ByteBuffer buf : bufs) { pool.returnBuffer(buf); } assertEquals(10,pool.countBuffersOfSize(100)); bufs.clear(); bufs=null; for (int i=0; i < 3; i++) { System.gc(); } ByteBuffer buf=pool.getBuffer(100); assertEquals(0,pool.countBuffersOfSize(100)); pool.returnBuffer(buf); }

    InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @Test public void testBasics(){ ByteBuffer a=pool.getBuffer(100); assertEquals(100,a.capacity()); assertEquals(100,a.remaining()); pool.returnBuffer(a); ByteBuffer b=pool.getBuffer(100); assertSame(a,b); ByteBuffer c=pool.getBuffer(100); assertNotSame(b,c); pool.returnBuffer(b); pool.returnBuffer(c); }

    Class: org.apache.hadoop.util.TestHostsFileReader

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testHostFileReaderWithCommentsOnly() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.write("#DFS-Hosts-excluded\n"); efw.close(); ifw.write("#Hosts-in-DFS\n"); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); int includesLen=hfp.getHosts().size(); int excludesLen=hfp.getExcludedHosts().size(); assertEquals(0,includesLen); assertEquals(0,excludesLen); assertFalse(hfp.getHosts().contains("somehost5")); assertFalse(hfp.getExcludedHosts().contains("somehost5")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testHostFileReaderWithTabs() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.write("#DFS-Hosts-excluded\n"); efw.write(" \n"); efw.write(" somehost \t somehost2 \n somehost4"); efw.write(" somehost3 \t # somehost5"); efw.close(); ifw.write("#Hosts-in-DFS\n"); ifw.write(" \n"); ifw.write(" somehost \t somehost2 \n somehost4"); ifw.write(" somehost3 \t # somehost5"); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); int includesLen=hfp.getHosts().size(); int excludesLen=hfp.getExcludedHosts().size(); assertEquals(4,includesLen); assertEquals(4,excludesLen); assertTrue(hfp.getHosts().contains("somehost2")); assertFalse(hfp.getHosts().contains("somehost5")); assertTrue(hfp.getExcludedHosts().contains("somehost2")); assertFalse(hfp.getExcludedHosts().contains("somehost5")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testHostFileReaderWithSpaces() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.write("#DFS-Hosts-excluded\n"); efw.write(" somehost somehost2"); efw.write(" somehost3 # somehost4"); efw.close(); ifw.write("#Hosts-in-DFS\n"); ifw.write(" somehost somehost2"); ifw.write(" somehost3 # somehost4"); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); int includesLen=hfp.getHosts().size(); int excludesLen=hfp.getExcludedHosts().size(); assertEquals(3,includesLen); assertEquals(3,excludesLen); assertTrue(hfp.getHosts().contains("somehost3")); assertFalse(hfp.getHosts().contains("somehost5")); assertFalse(hfp.getHosts().contains("somehost4")); assertTrue(hfp.getExcludedHosts().contains("somehost3")); assertFalse(hfp.getExcludedHosts().contains("somehost5")); assertFalse(hfp.getExcludedHosts().contains("somehost4")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testHostFileReaderWithNull() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.close(); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); int includesLen=hfp.getHosts().size(); int excludesLen=hfp.getExcludedHosts().size(); assertEquals(0,includesLen); assertEquals(0,excludesLen); assertFalse(hfp.getHosts().contains("somehost5")); assertFalse(hfp.getExcludedHosts().contains("somehost5")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testHostsFileReader() throws Exception { FileWriter efw=new FileWriter(excludesFile); FileWriter ifw=new FileWriter(includesFile); efw.write("#DFS-Hosts-excluded\n"); efw.write("somehost1\n"); efw.write("#This-is-comment\n"); efw.write("somehost2\n"); efw.write("somehost3 # host3\n"); efw.write("somehost4\n"); efw.write("somehost4 somehost5\n"); efw.close(); ifw.write("#Hosts-in-DFS\n"); ifw.write("somehost1\n"); ifw.write("somehost2\n"); ifw.write("somehost3\n"); ifw.write("#This-is-comment\n"); ifw.write("somehost4 # host4\n"); ifw.write("somehost4 somehost5\n"); ifw.close(); HostsFileReader hfp=new HostsFileReader(includesFile,excludesFile); int includesLen=hfp.getHosts().size(); int excludesLen=hfp.getExcludedHosts().size(); assertEquals(5,includesLen); assertEquals(5,excludesLen); assertTrue(hfp.getHosts().contains("somehost5")); assertFalse(hfp.getHosts().contains("host3")); assertTrue(hfp.getExcludedHosts().contains("somehost5")); assertFalse(hfp.getExcludedHosts().contains("host4")); }

    Class: org.apache.hadoop.util.TestIdentityHashStore

    IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testAdditionsAndRemovals(){ IdentityHashStore store=new IdentityHashStore(0); final int NUM_KEYS=1000; LOG.debug("generating " + NUM_KEYS + " keys"); final List keys=new ArrayList(NUM_KEYS); for (int i=0; i < NUM_KEYS; i++) { keys.add(new Key("key " + i)); } for (int i=0; i < NUM_KEYS; i++) { store.put(keys.get(i),i); } store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ Assert.assertTrue(keys.contains(k)); } } ); for (int i=0; i < NUM_KEYS; i++) { Assert.assertEquals(Integer.valueOf(i),store.remove(keys.get(i))); } store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ Assert.fail("expected all entries to be removed"); } } ); Assert.assertTrue("expected the store to be " + "empty, but found " + store.numElements() + " elements.",store.isEmpty()); Assert.assertEquals(1024,store.capacity()); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testStartingWithZeroCapacity(){ IdentityHashStore store=new IdentityHashStore(0); store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ Assert.fail("found key " + k + " in empty IdentityHashStore."); } } ); Assert.assertTrue(store.isEmpty()); final Key key1=new Key("key1"); Integer value1=new Integer(100); store.put(key1,value1); Assert.assertTrue(!store.isEmpty()); Assert.assertEquals(value1,store.get(key1)); store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ Assert.assertEquals(key1,k); } } ); Assert.assertEquals(value1,store.remove(key1)); Assert.assertTrue(store.isEmpty()); }

    IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testDuplicateInserts(){ IdentityHashStore store=new IdentityHashStore(4); store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ Assert.fail("found key " + k + " in empty IdentityHashStore."); } } ); Assert.assertTrue(store.isEmpty()); Key key1=new Key("key1"); Integer value1=new Integer(100); Integer value2=new Integer(200); Integer value3=new Integer(300); store.put(key1,value1); Key equalToKey1=new Key("key1"); Assert.assertNull(store.get(equalToKey1)); Assert.assertTrue(!store.isEmpty()); Assert.assertEquals(value1,store.get(key1)); store.put(key1,value2); store.put(key1,value3); final List allValues=new LinkedList(); store.visitAll(new Visitor(){ @Override public void accept( Key k, Integer v){ allValues.add(v); } } ); Assert.assertEquals(3,allValues.size()); for (int i=0; i < 3; i++) { Integer value=store.remove(key1); Assert.assertTrue(allValues.remove(value)); } Assert.assertNull(store.remove(key1)); Assert.assertTrue(store.isEmpty()); }

    Class: org.apache.hadoop.util.TestLightWeightGSet

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testRemoveAllViaIterator(){ ArrayList list=getRandomList(100,123); LightWeightGSet set=new LightWeightGSet(16); for ( Integer i : list) { set.put(new TestElement(i)); } for (Iterator iter=set.iterator(); iter.hasNext(); ) { iter.next(); iter.remove(); } Assert.assertEquals(0,set.size()); }

    Class: org.apache.hadoop.util.TestLineReader

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testCustomDelimiter() throws Exception { Delimiter=""; String CurrentBufferTailToken="GeleshOmathil"; String Expected=(CurrentBufferTailToken + NextBufferHeadToken).replace(Delimiter,""); String TestPartOfInput=CurrentBufferTailToken + NextBufferHeadToken; int BufferSize=64 * 1024; int numberOfCharToFillTheBuffer=BufferSize - CurrentBufferTailToken.length(); StringBuilder fillerString=new StringBuilder(); for (int i=0; i < numberOfCharToFillTheBuffer; i++) { fillerString.append('a'); } TestData=fillerString + TestPartOfInput; lineReader=new LineReader(new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes()); line=new Text(); lineReader.readLine(line); Assert.assertEquals(fillerString.toString(),line.toString()); lineReader.readLine(line); Assert.assertEquals(Expected,line.toString()); Delimiter="record"; StringBuilder TestStringBuilder=new StringBuilder(); TestStringBuilder.append(Delimiter + "Kerala "); TestStringBuilder.append(Delimiter + "Bangalore"); TestStringBuilder.append(Delimiter + " North Korea"); TestStringBuilder.append(Delimiter + Delimiter + "Guantanamo"); TestStringBuilder.append(Delimiter + "ecord" + "recor"+ "core"); TestData=TestStringBuilder.toString(); lineReader=new LineReader(new ByteArrayInputStream(TestData.getBytes()),Delimiter.getBytes()); lineReader.readLine(line); Assert.assertEquals("",line.toString()); lineReader.readLine(line); Assert.assertEquals("Kerala ",line.toString()); lineReader.readLine(line); Assert.assertEquals("Bangalore",line.toString()); lineReader.readLine(line); Assert.assertEquals(" North Korea",line.toString()); lineReader.readLine(line); Assert.assertEquals("",line.toString()); lineReader.readLine(line); Assert.assertEquals("Guantanamo",line.toString()); lineReader.readLine(line); Assert.assertEquals(("ecord" + "recor" + "core"),line.toString()); }

    Class: org.apache.hadoop.util.TestMachineList

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetCollection(){ MachineList ml=new MachineList(HOSTNAME_IP_CIDR_LIST); Collection col=ml.getCollection(); assertEquals(7,ml.getCollection().size()); for ( String item : StringUtils.getTrimmedStringCollection(HOSTNAME_IP_CIDR_LIST)) { assertTrue(col.contains(item)); } }

    Class: org.apache.hadoop.util.TestOptions

    EqualityVerifier 
    @Test public void testFind() throws Exception { Object[] opts=new Object[]{1,"hi",true,"bye",'x'}; assertEquals(1,Options.getOption(Integer.class,opts).intValue()); assertEquals("hi",Options.getOption(String.class,opts)); assertEquals(true,Options.getOption(Boolean.class,opts).booleanValue()); }

    EqualityVerifier 
    @Test public void testAppend() throws Exception { assertArrayEquals("first append",new String[]{"Dr.","Who","hi","there"},Options.prependOptions(new String[]{"hi","there"},"Dr.","Who")); assertArrayEquals("second append",new String[]{"aa","bb","cc","dd","ee","ff"},Options.prependOptions(new String[]{"dd","ee","ff"},"aa","bb","cc")); }

    Class: org.apache.hadoop.util.TestProgress

    InternalCallVerifier EqualityVerifier 
    @Test public void testSet(){ Progress progress=new Progress(); progress.set(Float.NaN); Assert.assertEquals(0,progress.getProgress(),0.0); progress.set(Float.NEGATIVE_INFINITY); Assert.assertEquals(0,progress.getProgress(),0.0); progress.set(-1); Assert.assertEquals(0,progress.getProgress(),0.0); progress.set((float)1.1); Assert.assertEquals(1,progress.getProgress(),0.0); progress.set(Float.POSITIVE_INFINITY); Assert.assertEquals(1,progress.getProgress(),0.0); }

    Class: org.apache.hadoop.util.TestReflectionUtils

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testCantCreate(){ try { ReflectionUtils.newInstance(NoDefaultCtor.class,null); fail("invalid call should fail"); } catch ( RuntimeException rte) { assertEquals(NoSuchMethodException.class,rte.getCause().getClass()); } }

    APIUtilityVerifier IterativeVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("unchecked") @Test public void testCacheDoesntLeak() throws Exception { int iterations=9999; for (int i=0; i < iterations; i++) { URLClassLoader loader=new URLClassLoader(new URL[0],getClass().getClassLoader()); Class cl=Class.forName("org.apache.hadoop.util.TestReflectionUtils$LoadedInChild",false,loader); Object o=ReflectionUtils.newInstance(cl,null); assertEquals(cl,o.getClass()); } System.gc(); assertTrue(cacheSize() + " too big",cacheSize() < iterations); }

    EqualityVerifier 
    @Test public void testCache() throws Exception { assertEquals(0,cacheSize()); doTestCache(); assertEquals(toConstruct.length,cacheSize()); ReflectionUtils.clearCache(); assertEquals(0,cacheSize()); }

    Class: org.apache.hadoop.util.TestShutdownHookManager

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void shutdownHookManager(){ ShutdownHookManager mgr=ShutdownHookManager.get(); Assert.assertNotNull(mgr); Assert.assertEquals(0,mgr.getShutdownHooksInOrder().size()); Runnable hook1=new Runnable(){ @Override public void run(){ } } ; Runnable hook2=new Runnable(){ @Override public void run(){ } } ; mgr.addShutdownHook(hook1,0); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size()); Assert.assertEquals(hook1,mgr.getShutdownHooksInOrder().get(0)); mgr.removeShutdownHook(hook1); Assert.assertFalse(mgr.hasShutdownHook(hook1)); mgr.addShutdownHook(hook1,0); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size()); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertEquals(1,mgr.getShutdownHooksInOrder().size()); mgr.addShutdownHook(hook2,1); Assert.assertTrue(mgr.hasShutdownHook(hook1)); Assert.assertTrue(mgr.hasShutdownHook(hook2)); Assert.assertEquals(2,mgr.getShutdownHooksInOrder().size()); Assert.assertEquals(hook2,mgr.getShutdownHooksInOrder().get(0)); Assert.assertEquals(hook1,mgr.getShutdownHooksInOrder().get(1)); }

    Class: org.apache.hadoop.util.TestShutdownThreadsHelper

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=3000) public void testShutdownThread(){ Thread thread=new Thread(sampleRunnable); thread.start(); boolean ret=ShutdownThreadsHelper.shutdownThread(thread); boolean isTerminated=!thread.isAlive(); assertEquals("Incorrect return value",ret,isTerminated); assertTrue("Thread is not shutdown",isTerminated); }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testShutdownThreadPool() throws InterruptedException { ScheduledThreadPoolExecutor executor=new ScheduledThreadPoolExecutor(1); executor.execute(sampleRunnable); boolean ret=ShutdownThreadsHelper.shutdownExecutorService(executor); boolean isTerminated=executor.isTerminated(); assertEquals("Incorrect return value",ret,isTerminated); assertTrue("ExecutorService is not shutdown",isTerminated); }

    Class: org.apache.hadoop.util.TestStringUtils

    APIUtilityVerifier EqualityVerifier 
    @Test(timeout=30000) public void testGetTrimmedStrings() throws Exception { String compactDirList="/spindle1/hdfs,/spindle2/hdfs,/spindle3/hdfs"; String spacedDirList="/spindle1/hdfs, /spindle2/hdfs, /spindle3/hdfs"; String pathologicalDirList1=" /spindle1/hdfs , /spindle2/hdfs ,/spindle3/hdfs "; String pathologicalDirList2=" /spindle1/hdfs , /spindle2/hdfs ,/spindle3/hdfs , "; String emptyList1=""; String emptyList2=" "; String[] expectedArray={"/spindle1/hdfs","/spindle2/hdfs","/spindle3/hdfs"}; String[] emptyArray={}; assertArrayEquals(expectedArray,StringUtils.getTrimmedStrings(compactDirList)); assertArrayEquals(expectedArray,StringUtils.getTrimmedStrings(spacedDirList)); assertArrayEquals(expectedArray,StringUtils.getTrimmedStrings(pathologicalDirList1)); assertArrayEquals(expectedArray,StringUtils.getTrimmedStrings(pathologicalDirList2)); assertArrayEquals(emptyArray,StringUtils.getTrimmedStrings(emptyList1)); String[] estring=StringUtils.getTrimmedStrings(emptyList2); assertArrayEquals(emptyArray,estring); }

    EqualityVerifier 
    @Test(timeout=30000) public void testSimpleHostName(){ assertEquals("Should return hostname when FQDN is specified","hadoop01",StringUtils.simpleHostname("hadoop01.domain.com")); assertEquals("Should return hostname when only hostname is specified","hadoop01",StringUtils.simpleHostname("hadoop01")); assertEquals("Should not truncate when IP address is passed","10.10.5.68",StringUtils.simpleHostname("10.10.5.68")); }

    EqualityVerifier 
    @Test(timeout=5000) public void testReplaceTokensWinEnvVars(){ Pattern pattern=StringUtils.WIN_ENV_VAR_PATTERN; Map replacements=new HashMap(); replacements.put("foo","zoo"); replacements.put("baz","zaz"); assertEquals("zoo",StringUtils.replaceTokens("%foo%",pattern,replacements)); assertEquals("zaz",StringUtils.replaceTokens("%baz%",pattern,replacements)); assertEquals("",StringUtils.replaceTokens("%bar%",pattern,replacements)); assertEquals("",StringUtils.replaceTokens("",pattern,replacements)); assertEquals("zoo__zaz",StringUtils.replaceTokens("%foo%_%bar%_%baz%",pattern,replacements)); assertEquals("begin zoo__zaz end",StringUtils.replaceTokens("begin %foo%_%bar%_%baz% end",pattern,replacements)); }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetUniqueNonEmptyTrimmedStrings(){ final String TO_SPLIT=",foo, bar,baz,,blah,blah,bar,"; Collection col=StringUtils.getTrimmedStringCollection(TO_SPLIT); assertEquals(4,col.size()); assertTrue(col.containsAll(Arrays.asList(new String[]{"foo","bar","baz","blah"}))); }

    EqualityVerifier 
    @Test(timeout=30000) public void testCamelize(){ assertEquals("Map",StringUtils.camelize("MAP")); assertEquals("JobSetup",StringUtils.camelize("JOB_SETUP")); assertEquals("SomeStuff",StringUtils.camelize("some_stuff")); assertEquals("Aa",StringUtils.camelize("aA")); assertEquals("Bb",StringUtils.camelize("bB")); assertEquals("Cc",StringUtils.camelize("cC")); assertEquals("Dd",StringUtils.camelize("dD")); assertEquals("Ee",StringUtils.camelize("eE")); assertEquals("Ff",StringUtils.camelize("fF")); assertEquals("Gg",StringUtils.camelize("gG")); assertEquals("Hh",StringUtils.camelize("hH")); assertEquals("Ii",StringUtils.camelize("iI")); assertEquals("Jj",StringUtils.camelize("jJ")); assertEquals("Kk",StringUtils.camelize("kK")); assertEquals("Ll",StringUtils.camelize("lL")); assertEquals("Mm",StringUtils.camelize("mM")); assertEquals("Nn",StringUtils.camelize("nN")); assertEquals("Oo",StringUtils.camelize("oO")); assertEquals("Pp",StringUtils.camelize("pP")); assertEquals("Qq",StringUtils.camelize("qQ")); assertEquals("Rr",StringUtils.camelize("rR")); assertEquals("Ss",StringUtils.camelize("sS")); assertEquals("Tt",StringUtils.camelize("tT")); assertEquals("Uu",StringUtils.camelize("uU")); assertEquals("Vv",StringUtils.camelize("vV")); assertEquals("Ww",StringUtils.camelize("wW")); assertEquals("Xx",StringUtils.camelize("xX")); assertEquals("Yy",StringUtils.camelize("yY")); assertEquals("Zz",StringUtils.camelize("zZ")); }

    EqualityVerifier 
    @Test(timeout=30000) public void testEscapeString() throws Exception { assertEquals(NULL_STR,StringUtils.escapeString(NULL_STR)); assertEquals(EMPTY_STR,StringUtils.escapeString(EMPTY_STR)); assertEquals(STR_WO_SPECIAL_CHARS,StringUtils.escapeString(STR_WO_SPECIAL_CHARS)); assertEquals(ESCAPED_STR_WITH_COMMA,StringUtils.escapeString(STR_WITH_COMMA)); assertEquals(ESCAPED_STR_WITH_ESCAPE,StringUtils.escapeString(STR_WITH_ESCAPE)); assertEquals(ESCAPED_STR_WITH_BOTH2,StringUtils.escapeString(STR_WITH_BOTH2)); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testUnescapeString() throws Exception { assertEquals(NULL_STR,StringUtils.unEscapeString(NULL_STR)); assertEquals(EMPTY_STR,StringUtils.unEscapeString(EMPTY_STR)); assertEquals(STR_WO_SPECIAL_CHARS,StringUtils.unEscapeString(STR_WO_SPECIAL_CHARS)); try { StringUtils.unEscapeString(STR_WITH_COMMA); fail("Should throw IllegalArgumentException"); } catch ( IllegalArgumentException e) { } assertEquals(STR_WITH_COMMA,StringUtils.unEscapeString(ESCAPED_STR_WITH_COMMA)); try { StringUtils.unEscapeString(STR_WITH_ESCAPE); fail("Should throw IllegalArgumentException"); } catch ( IllegalArgumentException e) { } assertEquals(STR_WITH_ESCAPE,StringUtils.unEscapeString(ESCAPED_STR_WITH_ESCAPE)); try { StringUtils.unEscapeString(STR_WITH_BOTH2); fail("Should throw IllegalArgumentException"); } catch ( IllegalArgumentException e) { } assertEquals(STR_WITH_BOTH2,StringUtils.unEscapeString(ESCAPED_STR_WITH_BOTH2)); }

    EqualityVerifier 
    @Test(timeout=5000) public void testReplaceTokensShellEnvVars(){ Pattern pattern=StringUtils.SHELL_ENV_VAR_PATTERN; Map replacements=new HashMap(); replacements.put("FOO","one"); replacements.put("BAZ","two"); replacements.put("NUMBERS123","one-two-three"); replacements.put("UNDER_SCORES","___"); assertEquals("one",StringUtils.replaceTokens("$FOO",pattern,replacements)); assertEquals("two",StringUtils.replaceTokens("$BAZ",pattern,replacements)); assertEquals("",StringUtils.replaceTokens("$BAR",pattern,replacements)); assertEquals("",StringUtils.replaceTokens("",pattern,replacements)); assertEquals("one-two-three",StringUtils.replaceTokens("$NUMBERS123",pattern,replacements)); assertEquals("___",StringUtils.replaceTokens("$UNDER_SCORES",pattern,replacements)); assertEquals("//one//two//",StringUtils.replaceTokens("//$FOO/$BAR/$BAZ//",pattern,replacements)); }

    EqualityVerifier 
    @Test(timeout=30000) public void testJoin(){ List s=new ArrayList(); s.add("a"); s.add("b"); s.add("c"); assertEquals("",StringUtils.join(":",s.subList(0,0))); assertEquals("a",StringUtils.join(":",s.subList(0,1))); assertEquals("a:b",StringUtils.join(":",s.subList(0,2))); assertEquals("a:b:c",StringUtils.join(":",s.subList(0,3))); }

    EqualityVerifier 
    @Test(timeout=30000) public void testSimpleSplit() throws Exception { final String[] TO_TEST={"a/b/c","a/b/c////","///a/b/c","","/","////"}; for ( String testSubject : TO_TEST) { assertArrayEquals("Testing '" + testSubject + "'",testSubject.split("/"),StringUtils.split(testSubject,'/')); } }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testStringToURI(){ String[] str=new String[]{"file://"}; try { StringUtils.stringToURI(str); fail("Ignoring URISyntaxException while creating URI from string file://"); } catch ( IllegalArgumentException iae) { assertEquals("Failed to create uri for file://",iae.getMessage()); } }

    APIUtilityVerifier EqualityVerifier 
    @Test(timeout=30000) public void testSplit() throws Exception { assertEquals(NULL_STR,StringUtils.split(NULL_STR)); String[] splits=StringUtils.split(EMPTY_STR); assertEquals(0,splits.length); splits=StringUtils.split(",,"); assertEquals(0,splits.length); splits=StringUtils.split(STR_WO_SPECIAL_CHARS); assertEquals(1,splits.length); assertEquals(STR_WO_SPECIAL_CHARS,splits[0]); splits=StringUtils.split(STR_WITH_COMMA); assertEquals(2,splits.length); assertEquals("A",splits[0]); assertEquals("B",splits[1]); splits=StringUtils.split(ESCAPED_STR_WITH_COMMA); assertEquals(1,splits.length); assertEquals(ESCAPED_STR_WITH_COMMA,splits[0]); splits=StringUtils.split(STR_WITH_ESCAPE); assertEquals(1,splits.length); assertEquals(STR_WITH_ESCAPE,splits[0]); splits=StringUtils.split(STR_WITH_BOTH2); assertEquals(3,splits.length); assertEquals(EMPTY_STR,splits[0]); assertEquals("A\\,",splits[1]); assertEquals("B\\\\",splits[2]); splits=StringUtils.split(ESCAPED_STR_WITH_BOTH2); assertEquals(1,splits.length); assertEquals(ESCAPED_STR_WITH_BOTH2,splits[0]); }

    IterativeVerifier UtilityVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test(timeout=30000) public void testTraditionalBinaryPrefix() throws Exception { String[] symbol={"k","m","g","t","p","e"}; long m=1024; for ( String s : symbol) { assertEquals(0,string2long(0 + s)); assertEquals(m,string2long(1 + s)); m*=1024; } assertEquals(0L,string2long("0")); assertEquals(1024L,string2long("1k")); assertEquals(-1024L,string2long("-1k")); assertEquals(1259520L,string2long("1230K")); assertEquals(-1259520L,string2long("-1230K")); assertEquals(104857600L,string2long("100m")); assertEquals(-104857600L,string2long("-100M")); assertEquals(956703965184L,string2long("891g")); assertEquals(-956703965184L,string2long("-891G")); assertEquals(501377302265856L,string2long("456t")); assertEquals(-501377302265856L,string2long("-456T")); assertEquals(11258999068426240L,string2long("10p")); assertEquals(-11258999068426240L,string2long("-10P")); assertEquals(1152921504606846976L,string2long("1e")); assertEquals(-1152921504606846976L,string2long("-1E")); String tooLargeNumStr="10e"; try { string2long(tooLargeNumStr); fail("Test passed for a number " + tooLargeNumStr + " too large"); } catch ( IllegalArgumentException e) { assertEquals(tooLargeNumStr + " does not fit in a Long",e.getMessage()); } String tooSmallNumStr="-10e"; try { string2long(tooSmallNumStr); fail("Test passed for a number " + tooSmallNumStr + " too small"); } catch ( IllegalArgumentException e) { assertEquals(tooSmallNumStr + " does not fit in a Long",e.getMessage()); } String invalidFormatNumStr="10kb"; char invalidPrefix='b'; try { string2long(invalidFormatNumStr); fail("Test passed for a number " + invalidFormatNumStr + " has invalid format"); } catch ( IllegalArgumentException e) { assertEquals("Invalid size prefix '" + invalidPrefix + "' in '"+ invalidFormatNumStr+ "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)",e.getMessage()); } assertEquals("0",long2String(0,null,2)); for (int decimalPlace=0; decimalPlace < 2; decimalPlace++) { for (int n=1; n < TraditionalBinaryPrefix.KILO.value; n++) { assertEquals(n + "",long2String(n,null,decimalPlace)); assertEquals(-n + "",long2String(-n,null,decimalPlace)); } assertEquals("1 K",long2String(1L << 10,null,decimalPlace)); assertEquals("-1 K",long2String(-1L << 10,null,decimalPlace)); } assertEquals("8.00 E",long2String(Long.MAX_VALUE,null,2)); assertEquals("8.00 E",long2String(Long.MAX_VALUE - 1,null,2)); assertEquals("-8 E",long2String(Long.MIN_VALUE,null,2)); assertEquals("-8.00 E",long2String(Long.MIN_VALUE + 1,null,2)); final String[] zeros={" ",".0 ",".00 "}; for (int decimalPlace=0; decimalPlace < zeros.length; decimalPlace++) { final String trailingZeros=zeros[decimalPlace]; for (int e=11; e < Long.SIZE - 1; e++) { final TraditionalBinaryPrefix p=TraditionalBinaryPrefix.values()[e / 10 - 1]; { final long n=1L << e; final String expected=(n / p.value) + " " + p.symbol; assertEquals("n=" + n,expected,long2String(n,null,2)); } { final long n=(1L << e) + 1; final String expected=(n / p.value) + trailingZeros + p.symbol; assertEquals("n=" + n,expected,long2String(n,null,decimalPlace)); } { final long n=(1L << e) - 1; final String expected=((n + 1) / p.value) + trailingZeros + p.symbol; assertEquals("n=" + n,expected,long2String(n,null,decimalPlace)); } } } assertEquals("1.50 K",long2String(3L << 9,null,2)); assertEquals("1.5 K",long2String(3L << 9,null,1)); assertEquals("1.50 M",long2String(3L << 19,null,2)); assertEquals("2 M",long2String(3L << 19,null,0)); assertEquals("3 G",long2String(3L << 30,null,2)); assertEquals("0 B",StringUtils.byteDesc(0)); assertEquals("-100 B",StringUtils.byteDesc(-100)); assertEquals("1 KB",StringUtils.byteDesc(1024)); assertEquals("1.50 KB",StringUtils.byteDesc(3L << 9)); assertEquals("1.50 MB",StringUtils.byteDesc(3L << 19)); assertEquals("3 GB",StringUtils.byteDesc(3L << 30)); assertEquals("10%",StringUtils.formatPercent(0.1,0)); assertEquals("10.0%",StringUtils.formatPercent(0.1,1)); assertEquals("10.00%",StringUtils.formatPercent(0.1,2)); assertEquals("1%",StringUtils.formatPercent(0.00543,0)); assertEquals("0.5%",StringUtils.formatPercent(0.00543,1)); assertEquals("0.54%",StringUtils.formatPercent(0.00543,2)); assertEquals("0.543%",StringUtils.formatPercent(0.00543,3)); assertEquals("0.5430%",StringUtils.formatPercent(0.00543,4)); }

    Class: org.apache.hadoop.util.TestVersionUtil

    EqualityVerifier 
    @Test public void testCompareVersions(){ assertEquals(0,VersionUtil.compareVersions("2.0.0","2.0.0")); assertEquals(0,VersionUtil.compareVersions("2.0.0a","2.0.0a")); assertEquals(0,VersionUtil.compareVersions("2.0.0-SNAPSHOT","2.0.0-SNAPSHOT")); assertEquals(0,VersionUtil.compareVersions("1","1")); assertEquals(0,VersionUtil.compareVersions("1","1.0")); assertEquals(0,VersionUtil.compareVersions("1","1.0.0")); assertEquals(0,VersionUtil.compareVersions("1.0","1")); assertEquals(0,VersionUtil.compareVersions("1.0","1.0")); assertEquals(0,VersionUtil.compareVersions("1.0","1.0.0")); assertEquals(0,VersionUtil.compareVersions("1.0.0","1")); assertEquals(0,VersionUtil.compareVersions("1.0.0","1.0")); assertEquals(0,VersionUtil.compareVersions("1.0.0","1.0.0")); assertEquals(0,VersionUtil.compareVersions("1.0.0-alpha-1","1.0.0-a1")); assertEquals(0,VersionUtil.compareVersions("1.0.0-alpha-2","1.0.0-a2")); assertEquals(0,VersionUtil.compareVersions("1.0.0-alpha1","1.0.0-alpha-1")); assertEquals(0,VersionUtil.compareVersions("1a0","1.0.0-alpha-0")); assertEquals(0,VersionUtil.compareVersions("1a0","1-a0")); assertEquals(0,VersionUtil.compareVersions("1.a0","1-a0")); assertEquals(0,VersionUtil.compareVersions("1.a0","1.0.0-alpha-0")); assertExpectedValues("1","2.0.0"); assertExpectedValues("1.0.0","2"); assertExpectedValues("1.0.0","2.0.0"); assertExpectedValues("1.0","2.0.0"); assertExpectedValues("1.0.0","2.0.0"); assertExpectedValues("1.0.0","1.0.0a"); assertExpectedValues("1.0.0.0","2.0.0"); assertExpectedValues("1.0.0","1.0.0-dev"); assertExpectedValues("1.0.0","1.0.1"); assertExpectedValues("1.0.0","1.0.2"); assertExpectedValues("1.0.0","1.1.0"); assertExpectedValues("2.0.0","10.0.0"); assertExpectedValues("1.0.0","1.0.0a"); assertExpectedValues("1.0.2a","1.0.10"); assertExpectedValues("1.0.2a","1.0.2b"); assertExpectedValues("1.0.2a","1.0.2ab"); assertExpectedValues("1.0.0a1","1.0.0a2"); assertExpectedValues("1.0.0a2","1.0.0a10"); assertExpectedValues("1.0","1.a"); assertExpectedValues("1.a0","1.0"); assertExpectedValues("1a0","1.0"); assertExpectedValues("1.0.1-alpha-1","1.0.1-alpha-2"); assertExpectedValues("1.0.1-beta-1","1.0.1-beta-2"); assertExpectedValues("1.0-SNAPSHOT","1.0"); assertExpectedValues("1.0.0-SNAPSHOT","1.0"); assertExpectedValues("1.0.0-SNAPSHOT","1.0.0"); assertExpectedValues("1.0.0","1.0.1-SNAPSHOT"); assertExpectedValues("1.0.1-SNAPSHOT","1.0.1"); assertExpectedValues("1.0.1-SNAPSHOT","1.0.2"); assertExpectedValues("1.0.1-alpha-1","1.0.1-SNAPSHOT"); assertExpectedValues("1.0.1-beta-1","1.0.1-SNAPSHOT"); assertExpectedValues("1.0.1-beta-2","1.0.1-SNAPSHOT"); }

    Class: org.apache.hadoop.util.TestWinUtils

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testLs() throws IOException { final String content="6bytes"; final int contentSize=content.length(); File testFile=new File(TEST_DIR,"file1"); writeFile(testFile,content); String output=Shell.execCommand(Shell.WINUTILS,"ls",testFile.getCanonicalPath()); String[] outputArgs=output.split("[ \r\n]"); assertTrue(outputArgs[0].equals("-rwx------")); assertTrue(outputArgs[outputArgs.length - 1].equals(testFile.getCanonicalPath())); output=Shell.execCommand(Shell.WINUTILS,"ls","-F",testFile.getCanonicalPath()); outputArgs=output.split("[|\r\n]"); assertEquals(9,outputArgs.length); assertTrue(outputArgs[0].equals("-rwx------")); assertEquals(contentSize,Long.parseLong(outputArgs[4])); assertTrue(outputArgs[8].equals(testFile.getCanonicalPath())); testFile.delete(); assertFalse(testFile.exists()); }

    APIUtilityVerifier EqualityVerifier 
    @Test(timeout=30000) public void testGroups() throws IOException { String currentUser=System.getProperty("user.name"); String outputNoArgs=Shell.execCommand(Shell.WINUTILS,"groups").trim(); String output=Shell.execCommand(Shell.WINUTILS,"groups",currentUser).trim(); assertEquals(output,outputNoArgs); String outputFormat=Shell.execCommand(Shell.WINUTILS,"groups","-F",currentUser).trim(); outputFormat=outputFormat.replace("|"," "); assertEquals(output,outputFormat); }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Validate behavior of chmod commands on directories on Windows. */ @Test(timeout=30000) public void testBasicChmodOnDir() throws IOException { File a=new File(TEST_DIR,"a"); File b=new File(a,"b"); a.mkdirs(); assertTrue(b.createNewFile()); chmod("300",a); String[] files=a.list(); assertTrue("Listing a directory without read permission should fail",null == files); chmod("700",a); files=a.list(); assertEquals("b",files[0]); chmod("500",a); File c=new File(a,"c"); try { c.createNewFile(); assertFalse("writeFile should have failed!",true); } catch ( IOException ex) { LOG.info("Expected: Failed to create a file when directory " + "permissions are 577"); } assertTrue("Special behavior: deleting a file will succeed on Windows " + "even if a user does not have write permissions on the parent dir",b.delete()); assertFalse("Renaming a file should fail on the dir where a user does " + "not have write permissions",b.renameTo(new File(a,"d"))); chmod("700",a); assertTrue(c.createNewFile()); File d=new File(a,"d"); assertTrue(c.renameTo(d)); chmod("600",a); files=a.list(); assertEquals("d",files[0]); assertTrue(d.delete()); File e=new File(a,"e"); assertTrue(e.createNewFile()); assertTrue(e.renameTo(new File(a,"f"))); chmod("700",a); }

    Class: org.apache.hadoop.util.TestZKUtil

    APIUtilityVerifier EqualityVerifier 
    @Test public void testRemoveSpecificPerms(){ int perms=Perms.ALL; int remove=Perms.CREATE; int newPerms=ZKUtil.removeSpecificPerms(perms,remove); assertEquals("Removal failed",0,newPerms & Perms.CREATE); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGoodAuths(){ List result=ZKUtil.parseAuth("scheme:data,\n scheme2:user:pass"); assertEquals(2,result.size()); ZKAuthInfo auth0=result.get(0); assertEquals("scheme",auth0.getScheme()); assertEquals("data",new String(auth0.getAuth())); ZKAuthInfo auth1=result.get(1); assertEquals("scheme2",auth1.getScheme()); assertEquals("user:pass",new String(auth1.getAuth())); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGoodACLs(){ List result=ZKUtil.parseACLs("sasl:hdfs/host1@MY.DOMAIN:cdrwa, sasl:hdfs/host2@MY.DOMAIN:ca"); ACL acl0=result.get(0); assertEquals(Perms.CREATE | Perms.DELETE | Perms.READ| Perms.WRITE| Perms.ADMIN,acl0.getPerms()); assertEquals("sasl",acl0.getId().getScheme()); assertEquals("hdfs/host1@MY.DOMAIN",acl0.getId().getId()); ACL acl1=result.get(1); assertEquals(Perms.CREATE | Perms.ADMIN,acl1.getPerms()); assertEquals("sasl",acl1.getId().getScheme()); assertEquals("hdfs/host2@MY.DOMAIN",acl1.getId().getId()); }

    UtilityVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testConfIndirection() throws IOException { assertNull(ZKUtil.resolveConfIndirection(null)); assertEquals("x",ZKUtil.resolveConfIndirection("x")); TEST_FILE.getParentFile().mkdirs(); Files.write("hello world",TEST_FILE,Charsets.UTF_8); assertEquals("hello world",ZKUtil.resolveConfIndirection("@" + TEST_FILE.getAbsolutePath())); try { ZKUtil.resolveConfIndirection("@" + BOGUS_FILE); fail("Did not throw for non-existent file reference"); } catch ( FileNotFoundException fnfe) { assertTrue(fnfe.getMessage().startsWith(BOGUS_FILE)); } }

    Class: org.apache.hadoop.yarn.TestRecordFactory

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testPbRecordFactory(){ RecordFactory pbRecordFactory=RecordFactoryPBImpl.get(); try { AllocateResponse response=pbRecordFactory.newRecordInstance(AllocateResponse.class); Assert.assertEquals(AllocateResponsePBImpl.class,response.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } try { AllocateRequest response=pbRecordFactory.newRecordInstance(AllocateRequest.class); Assert.assertEquals(AllocateRequestPBImpl.class,response.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } }

    Class: org.apache.hadoop.yarn.TestRpcFactoryProvider

    APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testFactoryProvider(){ Configuration conf=new Configuration(); RpcClientFactory clientFactory=null; RpcServerFactory serverFactory=null; clientFactory=RpcFactoryProvider.getClientFactory(conf); serverFactory=RpcFactoryProvider.getServerFactory(conf); Assert.assertEquals(RpcClientFactoryPBImpl.class,clientFactory.getClass()); Assert.assertEquals(RpcServerFactoryPBImpl.class,serverFactory.getClass()); conf.set(YarnConfiguration.IPC_CLIENT_FACTORY_CLASS,"unknown"); conf.set(YarnConfiguration.IPC_SERVER_FACTORY_CLASS,"unknown"); conf.set(YarnConfiguration.IPC_RECORD_FACTORY_CLASS,"unknown"); try { clientFactory=RpcFactoryProvider.getClientFactory(conf); Assert.fail("Expected an exception - unknown serializer"); } catch ( YarnRuntimeException e) { } try { serverFactory=RpcFactoryProvider.getServerFactory(conf); Assert.fail("Expected an exception - unknown serializer"); } catch ( YarnRuntimeException e) { } conf=new Configuration(); conf.set(YarnConfiguration.IPC_CLIENT_FACTORY_CLASS,"NonExistantClass"); conf.set(YarnConfiguration.IPC_SERVER_FACTORY_CLASS,RpcServerFactoryPBImpl.class.getName()); try { clientFactory=RpcFactoryProvider.getClientFactory(conf); Assert.fail("Expected an exception - unknown class"); } catch ( YarnRuntimeException e) { } try { serverFactory=RpcFactoryProvider.getServerFactory(conf); } catch ( YarnRuntimeException e) { Assert.fail("Error while loading factory using reflection: [" + RpcServerFactoryPBImpl.class.getName() + "]"); } }

    Class: org.apache.hadoop.yarn.TestYSCRecordFactory

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testPbRecordFactory(){ RecordFactory pbRecordFactory=RecordFactoryPBImpl.get(); try { NodeHeartbeatRequest request=pbRecordFactory.newRecordInstance(NodeHeartbeatRequest.class); Assert.assertEquals(NodeHeartbeatRequestPBImpl.class,request.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } }

    Class: org.apache.hadoop.yarn.TestYarnServerApiClasses

    InternalCallVerifier EqualityVerifier 
    /** * Test NodeStatusPBImpl. */ @Test public void testNodeStatusPBImpl(){ NodeStatusPBImpl original=new NodeStatusPBImpl(); original.setContainersStatuses(Arrays.asList(getContainerStatus(1,2,1),getContainerStatus(2,3,1))); original.setKeepAliveApplications(Arrays.asList(getApplicationId(3),getApplicationId(4))); original.setNodeHealthStatus(getNodeHealthStatus()); original.setNodeId(getNodeId()); original.setResponseId(1); NodeStatusPBImpl copy=new NodeStatusPBImpl(original.getProto()); assertEquals(3,copy.getContainersStatuses().get(1).getContainerId().getId()); assertEquals(3,copy.getKeepAliveApplications().get(0).getId()); assertEquals(1000,copy.getNodeHealthStatus().getLastHealthReportTime()); assertEquals(9090,copy.getNodeId().getPort()); assertEquals(1,copy.getResponseId()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test NodeHeartbeatResponsePBImpl. */ @Test public void testNodeHeartbeatResponsePBImpl(){ NodeHeartbeatResponsePBImpl original=new NodeHeartbeatResponsePBImpl(); original.setDiagnosticsMessage("testDiagnosticMessage"); original.setContainerTokenMasterKey(getMasterKey()); original.setNMTokenMasterKey(getMasterKey()); original.setNextHeartBeatInterval(1000); original.setNodeAction(NodeAction.NORMAL); original.setResponseId(100); NodeHeartbeatResponsePBImpl copy=new NodeHeartbeatResponsePBImpl(original.getProto()); assertEquals(100,copy.getResponseId()); assertEquals(NodeAction.NORMAL,copy.getNodeAction()); assertEquals(1000,copy.getNextHeartBeatInterval()); assertEquals(1,copy.getContainerTokenMasterKey().getKeyId()); assertEquals(1,copy.getNMTokenMasterKey().getKeyId()); assertEquals("testDiagnosticMessage",copy.getDiagnosticsMessage()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test NodeHeartbeatRequestPBImpl. */ @Test public void testNodeHeartbeatRequestPBImpl(){ NodeHeartbeatRequestPBImpl original=new NodeHeartbeatRequestPBImpl(); original.setLastKnownContainerTokenMasterKey(getMasterKey()); original.setLastKnownNMTokenMasterKey(getMasterKey()); original.setNodeStatus(getNodeStatus()); NodeHeartbeatRequestPBImpl copy=new NodeHeartbeatRequestPBImpl(original.getProto()); assertEquals(1,copy.getLastKnownContainerTokenMasterKey().getKeyId()); assertEquals(1,copy.getLastKnownNMTokenMasterKey().getKeyId()); assertEquals("localhost",copy.getNodeStatus().getNodeId().getHost()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test RegisterNodeManagerResponsePBImpl. Test getters and setters. The * RegisterNodeManagerResponsePBImpl should generate a prototype and data * restore from prototype */ @Test public void testRegisterNodeManagerResponsePBImpl(){ RegisterNodeManagerResponsePBImpl original=new RegisterNodeManagerResponsePBImpl(); original.setContainerTokenMasterKey(getMasterKey()); original.setNMTokenMasterKey(getMasterKey()); original.setNodeAction(NodeAction.NORMAL); original.setDiagnosticsMessage("testDiagnosticMessage"); RegisterNodeManagerResponsePBImpl copy=new RegisterNodeManagerResponsePBImpl(original.getProto()); assertEquals(1,copy.getContainerTokenMasterKey().getKeyId()); assertEquals(1,copy.getNMTokenMasterKey().getKeyId()); assertEquals(NodeAction.NORMAL,copy.getNodeAction()); assertEquals("testDiagnosticMessage",copy.getDiagnosticsMessage()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test RegisterNodeManagerRequestPBImpl. */ @Test public void testRegisterNodeManagerRequestPBImpl(){ RegisterNodeManagerRequestPBImpl original=new RegisterNodeManagerRequestPBImpl(); original.setHttpPort(8080); original.setNodeId(getNodeId()); Resource resource=recordFactory.newRecordInstance(Resource.class); resource.setMemory(10000); resource.setVirtualCores(2); original.setResource(resource); RegisterNodeManagerRequestPBImpl copy=new RegisterNodeManagerRequestPBImpl(original.getProto()); assertEquals(8080,copy.getHttpPort()); assertEquals(9090,copy.getNodeId().getPort()); assertEquals(10000,copy.getResource().getMemory()); assertEquals(2,copy.getResource().getVirtualCores()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test MasterKeyPBImpl. */ @Test public void testMasterKeyPBImpl(){ MasterKeyPBImpl original=new MasterKeyPBImpl(); original.setBytes(ByteBuffer.allocate(0)); original.setKeyId(1); MasterKeyPBImpl copy=new MasterKeyPBImpl(original.getProto()); assertEquals(1,copy.getKeyId()); assertTrue(original.equals(copy)); assertEquals(original.hashCode(),copy.hashCode()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test SerializedExceptionPBImpl. */ @Test public void testSerializedExceptionPBImpl(){ SerializedExceptionPBImpl original=new SerializedExceptionPBImpl(); original.init("testMessage"); SerializedExceptionPBImpl copy=new SerializedExceptionPBImpl(original.getProto()); assertEquals("testMessage",copy.getMessage()); original=new SerializedExceptionPBImpl(); original.init("testMessage",new Throwable(new Throwable("parent"))); copy=new SerializedExceptionPBImpl(original.getProto()); assertEquals("testMessage",copy.getMessage()); assertEquals("parent",copy.getCause().getMessage()); assertTrue(copy.getRemoteTrace().startsWith("java.lang.Throwable: java.lang.Throwable: parent")); }

    Class: org.apache.hadoop.yarn.api.TestAllocateRequest

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAllcoateRequestWithIncrease(){ List incRequests=new ArrayList(); for (int i=0; i < 3; i++) { incRequests.add(ContainerResourceIncreaseRequest.newInstance(null,Resource.newInstance(0,i))); } AllocateRequest r=AllocateRequest.newInstance(123,0f,null,null,null,incRequests); AllocateRequestProto p=((AllocateRequestPBImpl)r).getProto(); r=new AllocateRequestPBImpl(p); Assert.assertEquals(123,r.getResponseId()); Assert.assertEquals(incRequests.size(),r.getIncreaseRequests().size()); for (int i=0; i < incRequests.size(); i++) { Assert.assertEquals(r.getIncreaseRequests().get(i).getCapability().getVirtualCores(),incRequests.get(i).getCapability().getVirtualCores()); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAllcoateRequestWithoutIncrease(){ AllocateRequest r=AllocateRequest.newInstance(123,0f,null,null,null,null); AllocateRequestProto p=((AllocateRequestPBImpl)r).getProto(); r=new AllocateRequestPBImpl(p); Assert.assertEquals(123,r.getResponseId()); Assert.assertEquals(0,r.getIncreaseRequests().size()); }

    Class: org.apache.hadoop.yarn.api.TestAllocateResponse

    InternalCallVerifier EqualityVerifier 
    @Test public void testAllocateResponseWithoutIncDecContainers(){ AllocateResponse r=AllocateResponse.newInstance(3,new ArrayList(),new ArrayList(),new ArrayList(),null,AMCommand.AM_RESYNC,3,null,new ArrayList(),null,null); AllocateResponseProto p=((AllocateResponsePBImpl)r).getProto(); r=new AllocateResponsePBImpl(p); Assert.assertEquals(0,r.getIncreasedContainers().size()); Assert.assertEquals(0,r.getDecreasedContainers().size()); }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAllocateResponseWithIncDecContainers(){ List incContainers=new ArrayList(); List decContainers=new ArrayList(); for (int i=0; i < 3; i++) { incContainers.add(ContainerResourceIncrease.newInstance(null,Resource.newInstance(1024,i),null)); } for (int i=0; i < 5; i++) { decContainers.add(ContainerResourceDecrease.newInstance(null,Resource.newInstance(1024,i))); } AllocateResponse r=AllocateResponse.newInstance(3,new ArrayList(),new ArrayList(),new ArrayList(),null,AMCommand.AM_RESYNC,3,null,new ArrayList(),incContainers,decContainers); AllocateResponseProto p=((AllocateResponsePBImpl)r).getProto(); r=new AllocateResponsePBImpl(p); Assert.assertEquals(incContainers.size(),r.getIncreasedContainers().size()); Assert.assertEquals(decContainers.size(),r.getDecreasedContainers().size()); for (int i=0; i < incContainers.size(); i++) { Assert.assertEquals(i,r.getIncreasedContainers().get(i).getCapability().getVirtualCores()); } for (int i=0; i < decContainers.size(); i++) { Assert.assertEquals(i,r.getDecreasedContainers().get(i).getCapability().getVirtualCores()); } }

    Class: org.apache.hadoop.yarn.api.TestApplicationAttemptId

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testApplicationAttemptId(){ ApplicationAttemptId a1=createAppAttemptId(10l,1,1); ApplicationAttemptId a2=createAppAttemptId(10l,1,2); ApplicationAttemptId a3=createAppAttemptId(10l,2,1); ApplicationAttemptId a4=createAppAttemptId(8l,1,4); ApplicationAttemptId a5=createAppAttemptId(10l,1,1); Assert.assertTrue(a1.equals(a5)); Assert.assertFalse(a1.equals(a2)); Assert.assertFalse(a1.equals(a3)); Assert.assertFalse(a1.equals(a4)); Assert.assertTrue(a1.compareTo(a5) == 0); Assert.assertTrue(a1.compareTo(a2) < 0); Assert.assertTrue(a1.compareTo(a3) < 0); Assert.assertTrue(a1.compareTo(a4) > 0); Assert.assertTrue(a1.hashCode() == a5.hashCode()); Assert.assertFalse(a1.hashCode() == a2.hashCode()); Assert.assertFalse(a1.hashCode() == a3.hashCode()); Assert.assertFalse(a1.hashCode() == a4.hashCode()); long ts=System.currentTimeMillis(); ApplicationAttemptId a6=createAppAttemptId(ts,543627,33492611); Assert.assertEquals("appattempt_10_0001_000001",a1.toString()); Assert.assertEquals("appattempt_" + ts + "_543627_33492611",a6.toString()); }

    Class: org.apache.hadoop.yarn.api.TestApplicationId

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testApplicationId(){ ApplicationId a1=ApplicationId.newInstance(10l,1); ApplicationId a2=ApplicationId.newInstance(10l,2); ApplicationId a3=ApplicationId.newInstance(10l,1); ApplicationId a4=ApplicationId.newInstance(8l,3); Assert.assertFalse(a1.equals(a2)); Assert.assertFalse(a1.equals(a4)); Assert.assertTrue(a1.equals(a3)); Assert.assertTrue(a1.compareTo(a2) < 0); Assert.assertTrue(a1.compareTo(a3) == 0); Assert.assertTrue(a1.compareTo(a4) > 0); Assert.assertTrue(a1.hashCode() == a3.hashCode()); Assert.assertFalse(a1.hashCode() == a2.hashCode()); Assert.assertFalse(a2.hashCode() == a4.hashCode()); long ts=System.currentTimeMillis(); ApplicationId a5=ApplicationId.newInstance(ts,45436343); Assert.assertEquals("application_10_0001",a1.toString()); Assert.assertEquals("application_" + ts + "_45436343",a5.toString()); }

    Class: org.apache.hadoop.yarn.api.TestApplicatonReport

    APIUtilityVerifier InternalCallVerifier IdentityVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testApplicationReport(){ long timestamp=System.currentTimeMillis(); ApplicationReport appReport1=createApplicationReport(1,1,timestamp); ApplicationReport appReport2=createApplicationReport(1,1,timestamp); ApplicationReport appReport3=createApplicationReport(1,1,timestamp); Assert.assertEquals(appReport1,appReport2); Assert.assertEquals(appReport2,appReport3); appReport1.setApplicationId(null); Assert.assertNull(appReport1.getApplicationId()); Assert.assertNotSame(appReport1,appReport2); appReport2.setCurrentApplicationAttemptId(null); Assert.assertNull(appReport2.getCurrentApplicationAttemptId()); Assert.assertNotSame(appReport2,appReport3); Assert.assertNull(appReport1.getAMRMToken()); }

    Class: org.apache.hadoop.yarn.api.TestContainerId

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerId(){ ContainerId c1=newContainerId(1,1,10l,1); ContainerId c2=newContainerId(1,1,10l,2); ContainerId c3=newContainerId(1,1,10l,1); ContainerId c4=newContainerId(1,3,10l,1); ContainerId c5=newContainerId(1,3,8l,1); Assert.assertTrue(c1.equals(c3)); Assert.assertFalse(c1.equals(c2)); Assert.assertFalse(c1.equals(c4)); Assert.assertFalse(c1.equals(c5)); Assert.assertTrue(c1.compareTo(c3) == 0); Assert.assertTrue(c1.compareTo(c2) < 0); Assert.assertTrue(c1.compareTo(c4) < 0); Assert.assertTrue(c1.compareTo(c5) > 0); Assert.assertTrue(c1.hashCode() == c3.hashCode()); Assert.assertFalse(c1.hashCode() == c2.hashCode()); Assert.assertFalse(c1.hashCode() == c4.hashCode()); Assert.assertFalse(c1.hashCode() == c5.hashCode()); long ts=System.currentTimeMillis(); ContainerId c6=newContainerId(36473,4365472,ts,25645811); Assert.assertEquals("container_10_0001_01_000001",c1.toString()); Assert.assertEquals("container_" + ts + "_36473_4365472_25645811",c6.toString()); }

    Class: org.apache.hadoop.yarn.api.TestContainerResourceDecrease

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testResourceDecreaseContext(){ ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7); Resource resource=Resource.newInstance(1023,3); ContainerResourceDecrease ctx=ContainerResourceDecrease.newInstance(containerId,resource); ContainerResourceDecreaseProto proto=((ContainerResourceDecreasePBImpl)ctx).getProto(); ctx=new ContainerResourceDecreasePBImpl(proto); Assert.assertEquals(ctx.getCapability(),resource); Assert.assertEquals(ctx.getContainerId(),containerId); }

    Class: org.apache.hadoop.yarn.api.TestContainerResourceIncrease

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testResourceIncreaseContext(){ byte[] identifier=new byte[]{1,2,3,4}; Token token=Token.newInstance(identifier,"","".getBytes(),""); ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7); Resource resource=Resource.newInstance(1023,3); ContainerResourceIncrease ctx=ContainerResourceIncrease.newInstance(containerId,resource,token); ContainerResourceIncreaseProto proto=((ContainerResourceIncreasePBImpl)ctx).getProto(); ctx=new ContainerResourceIncreasePBImpl(proto); Assert.assertEquals(ctx.getCapability(),resource); Assert.assertEquals(ctx.getContainerId(),containerId); Assert.assertTrue(Arrays.equals(ctx.getContainerToken().getIdentifier().array(),identifier)); }

    Class: org.apache.hadoop.yarn.api.TestContainerResourceIncreaseRequest

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void ContainerResourceIncreaseRequest(){ ContainerId containerId=ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234,3),3),7); Resource resource=Resource.newInstance(1023,3); ContainerResourceIncreaseRequest context=ContainerResourceIncreaseRequest.newInstance(containerId,resource); ContainerResourceIncreaseRequestProto proto=((ContainerResourceIncreaseRequestPBImpl)context).getProto(); ContainerResourceIncreaseRequest contextRecover=new ContainerResourceIncreaseRequestPBImpl(proto); Assert.assertEquals(contextRecover.getContainerId(),containerId); Assert.assertEquals(contextRecover.getCapability(),resource); }

    Class: org.apache.hadoop.yarn.api.TestGetApplicationsRequest

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetApplicationsRequest(){ GetApplicationsRequest request=GetApplicationsRequest.newInstance(); EnumSet appStates=EnumSet.of(YarnApplicationState.ACCEPTED); request.setApplicationStates(appStates); Set tags=new HashSet(); tags.add("tag1"); request.setApplicationTags(tags); Set types=new HashSet(); types.add("type1"); request.setApplicationTypes(types); long startBegin=System.currentTimeMillis(); long startEnd=System.currentTimeMillis() + 1; request.setStartRange(startBegin,startEnd); long finishBegin=System.currentTimeMillis() + 2; long finishEnd=System.currentTimeMillis() + 3; request.setFinishRange(finishBegin,finishEnd); long limit=100L; request.setLimit(limit); Set queues=new HashSet(); queues.add("queue1"); request.setQueues(queues); Set users=new HashSet(); users.add("user1"); request.setUsers(users); ApplicationsRequestScope scope=ApplicationsRequestScope.ALL; request.setScope(scope); GetApplicationsRequest requestFromProto=new GetApplicationsRequestPBImpl(((GetApplicationsRequestPBImpl)request).getProto()); Assert.assertEquals(requestFromProto,request); Assert.assertEquals("ApplicationStates from proto is not the same with original request",requestFromProto.getApplicationStates(),appStates); Assert.assertEquals("ApplicationTags from proto is not the same with original request",requestFromProto.getApplicationTags(),tags); Assert.assertEquals("ApplicationTypes from proto is not the same with original request",requestFromProto.getApplicationTypes(),types); Assert.assertEquals("StartRange from proto is not the same with original request",requestFromProto.getStartRange(),new LongRange(startBegin,startEnd)); Assert.assertEquals("FinishRange from proto is not the same with original request",requestFromProto.getFinishRange(),new LongRange(finishBegin,finishEnd)); Assert.assertEquals("Limit from proto is not the same with original request",requestFromProto.getLimit(),limit); Assert.assertEquals("Queues from proto is not the same with original request",requestFromProto.getQueues(),queues); Assert.assertEquals("Users from proto is not the same with original request",requestFromProto.getUsers(),users); }

    Class: org.apache.hadoop.yarn.api.TestNodeId

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodeId(){ NodeId nodeId1=NodeId.newInstance("10.18.52.124",8041); NodeId nodeId2=NodeId.newInstance("10.18.52.125",8038); NodeId nodeId3=NodeId.newInstance("10.18.52.124",8041); NodeId nodeId4=NodeId.newInstance("10.18.52.124",8039); Assert.assertTrue(nodeId1.equals(nodeId3)); Assert.assertFalse(nodeId1.equals(nodeId2)); Assert.assertFalse(nodeId3.equals(nodeId4)); Assert.assertTrue(nodeId1.compareTo(nodeId3) == 0); Assert.assertTrue(nodeId1.compareTo(nodeId2) < 0); Assert.assertTrue(nodeId3.compareTo(nodeId4) > 0); Assert.assertTrue(nodeId1.hashCode() == nodeId3.hashCode()); Assert.assertFalse(nodeId1.hashCode() == nodeId2.hashCode()); Assert.assertFalse(nodeId3.hashCode() == nodeId4.hashCode()); Assert.assertEquals("10.18.52.124:8041",nodeId1.toString()); }

    Class: org.apache.hadoop.yarn.api.records.impl.pb.TestSerializedExceptionPBImpl

    NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testBeforeInit() throws Exception { SerializedExceptionProto defaultProto=SerializedExceptionProto.newBuilder().build(); SerializedExceptionPBImpl pb1=new SerializedExceptionPBImpl(); Assert.assertNull(pb1.getCause()); SerializedExceptionPBImpl pb2=new SerializedExceptionPBImpl(); Assert.assertEquals(defaultProto,pb2.getProto()); SerializedExceptionPBImpl pb3=new SerializedExceptionPBImpl(); Assert.assertEquals(defaultProto.getTrace(),pb3.getRemoteTrace()); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testDeserialize() throws Exception { Exception ex=new Exception("test exception"); SerializedExceptionPBImpl pb=new SerializedExceptionPBImpl(); try { pb.deSerialize(); Assert.fail("deSerialze should throw YarnRuntimeException"); } catch ( YarnRuntimeException e) { Assert.assertEquals(ClassNotFoundException.class,e.getCause().getClass()); } pb.init(ex); Assert.assertEquals(ex.toString(),pb.deSerialize().toString()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSerializedException() throws Exception { SerializedExceptionPBImpl orig=new SerializedExceptionPBImpl(); orig.init(new Exception("test exception")); SerializedExceptionProto proto=orig.getProto(); SerializedExceptionPBImpl deser=new SerializedExceptionPBImpl(proto); Assert.assertEquals(orig,deser); Assert.assertEquals(orig.getMessage(),deser.getMessage()); Assert.assertEquals(orig.getRemoteTrace(),deser.getRemoteTrace()); Assert.assertEquals(orig.getCause(),deser.getCause()); }

    Class: org.apache.hadoop.yarn.api.records.timeline.TestTimelineRecords

    InternalCallVerifier EqualityVerifier 
    @Test public void testEntities() throws Exception { TimelineEntities entities=new TimelineEntities(); for (int j=0; j < 2; ++j) { TimelineEntity entity=new TimelineEntity(); entity.setEntityId("entity id " + j); entity.setEntityType("entity type " + j); entity.setStartTime(System.currentTimeMillis()); for (int i=0; i < 2; ++i) { TimelineEvent event=new TimelineEvent(); event.setTimestamp(System.currentTimeMillis()); event.setEventType("event type " + i); event.addEventInfo("key1","val1"); event.addEventInfo("key2","val2"); entity.addEvent(event); } entity.addRelatedEntity("test ref type 1","test ref id 1"); entity.addRelatedEntity("test ref type 2","test ref id 2"); entity.addPrimaryFilter("pkey1","pval1"); entity.addPrimaryFilter("pkey2","pval2"); entity.addOtherInfo("okey1","oval1"); entity.addOtherInfo("okey2","oval2"); entities.addEntity(entity); } LOG.info("Entities in JSON:"); LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(entities,true)); Assert.assertEquals(2,entities.getEntities().size()); TimelineEntity entity1=entities.getEntities().get(0); Assert.assertEquals("entity id 0",entity1.getEntityId()); Assert.assertEquals("entity type 0",entity1.getEntityType()); Assert.assertEquals(2,entity1.getRelatedEntities().size()); Assert.assertEquals(2,entity1.getEvents().size()); Assert.assertEquals(2,entity1.getPrimaryFilters().size()); Assert.assertEquals(2,entity1.getOtherInfo().size()); TimelineEntity entity2=entities.getEntities().get(1); Assert.assertEquals("entity id 1",entity2.getEntityId()); Assert.assertEquals("entity type 1",entity2.getEntityType()); Assert.assertEquals(2,entity2.getRelatedEntities().size()); Assert.assertEquals(2,entity2.getEvents().size()); Assert.assertEquals(2,entity2.getPrimaryFilters().size()); Assert.assertEquals(2,entity2.getOtherInfo().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testEvents() throws Exception { TimelineEvents events=new TimelineEvents(); for (int j=0; j < 2; ++j) { TimelineEvents.EventsOfOneEntity partEvents=new TimelineEvents.EventsOfOneEntity(); partEvents.setEntityId("entity id " + j); partEvents.setEntityType("entity type " + j); for (int i=0; i < 2; ++i) { TimelineEvent event=new TimelineEvent(); event.setTimestamp(System.currentTimeMillis()); event.setEventType("event type " + i); event.addEventInfo("key1","val1"); event.addEventInfo("key2","val2"); partEvents.addEvent(event); } events.addEvent(partEvents); } LOG.info("Events in JSON:"); LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(events,true)); Assert.assertEquals(2,events.getAllEvents().size()); TimelineEvents.EventsOfOneEntity partEvents1=events.getAllEvents().get(0); Assert.assertEquals("entity id 0",partEvents1.getEntityId()); Assert.assertEquals("entity type 0",partEvents1.getEntityType()); Assert.assertEquals(2,partEvents1.getEvents().size()); TimelineEvent event11=partEvents1.getEvents().get(0); Assert.assertEquals("event type 0",event11.getEventType()); Assert.assertEquals(2,event11.getEventInfo().size()); TimelineEvent event12=partEvents1.getEvents().get(1); Assert.assertEquals("event type 1",event12.getEventType()); Assert.assertEquals(2,event12.getEventInfo().size()); TimelineEvents.EventsOfOneEntity partEvents2=events.getAllEvents().get(1); Assert.assertEquals("entity id 1",partEvents2.getEntityId()); Assert.assertEquals("entity type 1",partEvents2.getEntityType()); Assert.assertEquals(2,partEvents2.getEvents().size()); TimelineEvent event21=partEvents2.getEvents().get(0); Assert.assertEquals("event type 0",event21.getEventType()); Assert.assertEquals(2,event21.getEventInfo().size()); TimelineEvent event22=partEvents2.getEvents().get(1); Assert.assertEquals("event type 1",event22.getEventType()); Assert.assertEquals(2,event22.getEventInfo().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testTimelinePutErrors() throws Exception { TimelinePutResponse TimelinePutErrors=new TimelinePutResponse(); TimelinePutError error1=new TimelinePutError(); error1.setEntityId("entity id 1"); error1.setEntityId("entity type 1"); error1.setErrorCode(TimelinePutError.NO_START_TIME); TimelinePutErrors.addError(error1); List response=new ArrayList(); response.add(error1); TimelinePutError error2=new TimelinePutError(); error2.setEntityId("entity id 2"); error2.setEntityId("entity type 2"); error2.setErrorCode(TimelinePutError.IO_EXCEPTION); response.add(error2); TimelinePutErrors.addErrors(response); LOG.info("Errors in JSON:"); LOG.info(TimelineUtils.dumpTimelineRecordtoJSON(TimelinePutErrors,true)); Assert.assertEquals(3,TimelinePutErrors.getErrors().size()); TimelinePutError e=TimelinePutErrors.getErrors().get(0); Assert.assertEquals(error1.getEntityId(),e.getEntityId()); Assert.assertEquals(error1.getEntityType(),e.getEntityType()); Assert.assertEquals(error1.getErrorCode(),e.getErrorCode()); e=TimelinePutErrors.getErrors().get(1); Assert.assertEquals(error1.getEntityId(),e.getEntityId()); Assert.assertEquals(error1.getEntityType(),e.getEntityType()); Assert.assertEquals(error1.getErrorCode(),e.getErrorCode()); e=TimelinePutErrors.getErrors().get(2); Assert.assertEquals(error2.getEntityId(),e.getEntityId()); Assert.assertEquals(error2.getEntityType(),e.getEntityType()); Assert.assertEquals(error2.getErrorCode(),e.getErrorCode()); }

    Class: org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=90000) public void testDSShell() throws Exception { String[] args={"--jar",APPMASTER_JAR,"--num_containers","2","--shell_command",Shell.WINDOWS ? "dir" : "ls","--master_memory","512","--master_vcores","2","--container_memory","128","--container_vcores","1"}; LOG.info("Initializing DS Client"); final Client client=new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess=client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); final AtomicBoolean result=new AtomicBoolean(false); Thread t=new Thread(){ public void run(){ try { result.set(client.run()); } catch ( Exception e) { throw new RuntimeException(e); } } } ; t.start(); YarnClient yarnClient=YarnClient.createYarnClient(); yarnClient.init(new Configuration(yarnCluster.getConfig())); yarnClient.start(); String hostName=NetUtils.getHostname(); boolean verified=false; String errorMessage=""; while (!verified) { List apps=yarnClient.getApplications(); if (apps.size() == 0) { Thread.sleep(10); continue; } ApplicationReport appReport=apps.get(0); if (appReport.getHost().equals("N/A")) { Thread.sleep(10); continue; } errorMessage="Expected host name to start with '" + hostName + "', was '"+ appReport.getHost()+ "'. Expected rpc port to be '-1', was '"+ appReport.getRpcPort()+ "'."; if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) { verified=true; } if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) { break; } } Assert.assertTrue(errorMessage,verified); t.join(); LOG.info("Client run completed. Result=" + result); Assert.assertTrue(result.get()); TimelineEntities entitiesAttempts=yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(),null,null,null,null,null,null,null,null); Assert.assertNotNull(entitiesAttempts); Assert.assertEquals(1,entitiesAttempts.getEntities().size()); Assert.assertEquals(2,entitiesAttempts.getEntities().get(0).getEvents().size()); Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(),ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString()); TimelineEntities entities=yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_CONTAINER.toString(),null,null,null,null,null,null,null,null); Assert.assertNotNull(entities); Assert.assertEquals(2,entities.getEntities().size()); Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(),ApplicationMaster.DSEntity.DS_CONTAINER.toString()); }

    Class: org.apache.hadoop.yarn.client.TestApplicationClientProtocolOnHA

    APIUtilityVerifier EqualityVerifier 
    @Test(timeout=15000) public void testRenewDelegationTokenOnHA() throws Exception { RenewDelegationTokenRequest request=RenewDelegationTokenRequest.newInstance(cluster.createFakeToken()); long newExpirationTime=ClientRMProxy.createRMProxy(this.conf,ApplicationClientProtocol.class).renewDelegationToken(request).getNextExpirationTime(); Assert.assertEquals(newExpirationTime,cluster.createNextExpirationTime()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetClusterNodesOnHA() throws Exception { List reports=client.getNodeReports(NodeState.RUNNING); Assert.assertTrue(reports != null && !reports.isEmpty()); Assert.assertEquals(cluster.createFakeNodeReports(),reports); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=15000) public void testGetDelegationTokenOnHA() throws Exception { Token token=client.getRMDelegationToken(new Text(" ")); Assert.assertEquals(token,cluster.createFakeToken()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetApplicationAttemptsOnHA() throws Exception { List reports=client.getApplicationAttempts(cluster.createFakeAppId()); Assert.assertTrue(reports != null && !reports.isEmpty()); Assert.assertEquals(cluster.createFakeApplicationAttemptReports(),reports); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetContainersOnHA() throws Exception { List reports=client.getContainers(cluster.createFakeApplicationAttemptId()); Assert.assertTrue(reports != null && !reports.isEmpty()); Assert.assertEquals(cluster.createFakeContainerReports(),reports); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetNewApplicationOnHA() throws Exception { ApplicationId appId=client.createApplication().getApplicationSubmissionContext().getApplicationId(); Assert.assertTrue(appId != null); Assert.assertEquals(cluster.createFakeAppId(),appId); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetQueueInfoOnHA() throws Exception { QueueInfo queueInfo=client.getQueueInfo("root"); Assert.assertTrue(queueInfo != null); Assert.assertEquals(cluster.createFakeQueueInfo(),queueInfo); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetApplicationAttemptReportOnHA() throws Exception { ApplicationAttemptReport report=client.getApplicationAttemptReport(cluster.createFakeApplicationAttemptId()); Assert.assertTrue(report != null); Assert.assertEquals(cluster.createFakeApplicationAttemptReport(),report); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetApplicationReportOnHA() throws Exception { ApplicationReport report=client.getApplicationReport(cluster.createFakeAppId()); Assert.assertTrue(report != null); Assert.assertEquals(cluster.createFakeAppReport(),report); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetContainerReportOnHA() throws Exception { ContainerReport report=client.getContainerReport(cluster.createFakeContainerId()); Assert.assertTrue(report != null); Assert.assertEquals(cluster.createFakeContainerReport(),report); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetQueueUserAclsOnHA() throws Exception { List queueUserAclsList=client.getQueueAclsInfo(); Assert.assertTrue(queueUserAclsList != null && !queueUserAclsList.isEmpty()); Assert.assertEquals(cluster.createFakeQueueUserACLInfoList(),queueUserAclsList); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetApplicationsOnHA() throws Exception { List reports=client.getApplications(); Assert.assertTrue(reports != null && !reports.isEmpty()); Assert.assertEquals(cluster.createFakeAppReports(),reports); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=15000) public void testGetClusterMetricsOnHA() throws Exception { YarnClusterMetrics clusterMetrics=client.getYarnClusterMetrics(); Assert.assertTrue(clusterMetrics != null); Assert.assertEquals(cluster.createFakeYarnClusterMetrics(),clusterMetrics); }

    Class: org.apache.hadoop.yarn.client.TestApplicationMasterServiceOnHA

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=15000) public void testAllocateOnHA() throws YarnException, IOException { AllocateRequest request=AllocateRequest.newInstance(0,50f,new ArrayList(),new ArrayList(),ResourceBlacklistRequest.newInstance(new ArrayList(),new ArrayList())); AllocateResponse response=amClient.allocate(request); Assert.assertEquals(response,this.cluster.createFakeAllocateResponse()); }

    Class: org.apache.hadoop.yarn.client.TestClientRMProxy

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetRMDelegationTokenService(){ String defaultRMAddress=YarnConfiguration.DEFAULT_RM_ADDRESS; YarnConfiguration conf=new YarnConfiguration(); Text tokenService=ClientRMProxy.getRMDelegationTokenService(conf); String[] services=tokenService.toString().split(","); assertEquals(1,services.length); for ( String service : services) { assertTrue("Incorrect token service name",service.contains(defaultRMAddress)); } conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2"); conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,"rm1"),"0.0.0.0"); conf.set(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,"rm2"),"0.0.0.0"); tokenService=ClientRMProxy.getRMDelegationTokenService(conf); services=tokenService.toString().split(","); assertEquals(2,services.length); for ( String service : services) { assertTrue("Incorrect token service name",service.contains(defaultRMAddress)); } }

    Class: org.apache.hadoop.yarn.client.TestRMAdminCLI

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=500) public void testException() throws Exception { PrintStream oldErrPrintStream=System.err; ByteArrayOutputStream dataErr=new ByteArrayOutputStream(); System.setErr(new PrintStream(dataErr)); try { when(admin.refreshQueues(any(RefreshQueuesRequest.class))).thenThrow(new IOException("test exception")); String[] args={"-refreshQueues"}; assertEquals(-1,rmAdminCLI.run(args)); verify(admin).refreshQueues(any(RefreshQueuesRequest.class)); assertTrue(dataErr.toString().contains("refreshQueues: test exception")); } finally { System.setErr(oldErrPrintStream); } }

    EqualityVerifier 
    @Test(timeout=500) public void testRefreshNodes() throws Exception { String[] args={"-refreshNodes"}; assertEquals(0,rmAdminCLI.run(args)); verify(admin).refreshNodes(any(RefreshNodesRequest.class)); }

    EqualityVerifier 
    @Test(timeout=500) public void testGetServiceState() throws Exception { String[] args={"-getServiceState","rm1"}; assertEquals(-1,rmAdminCLI.run(args)); verify(haadmin,never()).getServiceStatus(); assertEquals(0,rmAdminCLIWithHAEnabled.run(args)); verify(haadmin).getServiceStatus(); }

    EqualityVerifier 
    @Test(timeout=500) public void testTransitionToStandby() throws Exception { String[] args={"-transitionToStandby","rm1"}; assertEquals(-1,rmAdminCLI.run(args)); verify(haadmin,never()).transitionToStandby(any(HAServiceProtocol.StateChangeRequestInfo.class)); assertEquals(0,rmAdminCLIWithHAEnabled.run(args)); verify(haadmin).transitionToStandby(any(HAServiceProtocol.StateChangeRequestInfo.class)); }

    EqualityVerifier 
    @Test(timeout=500) public void testRefreshUserToGroupsMappings() throws Exception { String[] args={"-refreshUserToGroupsMappings"}; assertEquals(0,rmAdminCLI.run(args)); verify(admin).refreshUserToGroupsMappings(any(RefreshUserToGroupsMappingsRequest.class)); }

    EqualityVerifier 
    @Test(timeout=500) public void testTransitionToActive() throws Exception { String[] args={"-transitionToActive","rm1"}; assertEquals(-1,rmAdminCLI.run(args)); verify(haadmin,never()).transitionToActive(any(HAServiceProtocol.StateChangeRequestInfo.class)); assertEquals(0,rmAdminCLIWithHAEnabled.run(args)); verify(haadmin).transitionToActive(any(HAServiceProtocol.StateChangeRequestInfo.class)); }

    EqualityVerifier 
    @Test(timeout=500) public void testCheckHealth() throws Exception { String[] args={"-checkHealth","rm1"}; assertEquals(-1,rmAdminCLI.run(args)); verify(haadmin,never()).monitorHealth(); assertEquals(0,rmAdminCLIWithHAEnabled.run(args)); verify(haadmin).monitorHealth(); }

    EqualityVerifier 
    @Test(timeout=500) public void testGetGroups() throws Exception { when(admin.getGroupsForUser(eq("admin"))).thenReturn(new String[]{"group1","group2"}); PrintStream origOut=System.out; PrintStream out=mock(PrintStream.class); System.setOut(out); try { String[] args={"-getGroups","admin"}; assertEquals(0,rmAdminCLI.run(args)); verify(admin).getGroupsForUser(eq("admin")); verify(out).println(argThat(new ArgumentMatcher(){ @Override public boolean matches( Object argument){ return ("" + argument).equals("admin : group1 group2"); } } )); } finally { System.setOut(origOut); } }

    BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test printing of help messages */ @Test(timeout=500) public void testHelp() throws Exception { PrintStream oldOutPrintStream=System.out; PrintStream oldErrPrintStream=System.err; ByteArrayOutputStream dataOut=new ByteArrayOutputStream(); ByteArrayOutputStream dataErr=new ByteArrayOutputStream(); System.setOut(new PrintStream(dataOut)); System.setErr(new PrintStream(dataErr)); try { String[] args={"-help"}; assertEquals(0,rmAdminCLI.run(args)); oldOutPrintStream.println(dataOut); assertTrue(dataOut.toString().contains("rmadmin is the command to execute YARN administrative commands.")); assertTrue(dataOut.toString().contains("yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper" + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup"+ " [username]] [-help [cmd]]")); assertTrue(dataOut.toString().contains("-refreshQueues: Reload the queues' acls, states and scheduler " + "specific properties.")); assertTrue(dataOut.toString().contains("-refreshNodes: Refresh the hosts information at the " + "ResourceManager.")); assertTrue(dataOut.toString().contains("-refreshUserToGroupsMappings: Refresh user-to-groups mappings")); assertTrue(dataOut.toString().contains("-refreshSuperUserGroupsConfiguration: Refresh superuser proxy" + " groups mappings")); assertTrue(dataOut.toString().contains("-refreshAdminAcls: Refresh acls for administration of " + "ResourceManager")); assertTrue(dataOut.toString().contains("-refreshServiceAcl: Reload the service-level authorization" + " policy file")); assertTrue(dataOut.toString().contains("-help [cmd]: Displays help for the given command or all " + "commands if none")); testError(new String[]{"-help","-refreshQueues"},"Usage: yarn rmadmin [-refreshQueues]",dataErr,0); testError(new String[]{"-help","-refreshNodes"},"Usage: yarn rmadmin [-refreshNodes]",dataErr,0); testError(new String[]{"-help","-refreshUserToGroupsMappings"},"Usage: yarn rmadmin [-refreshUserToGroupsMappings]",dataErr,0); testError(new String[]{"-help","-refreshSuperUserGroupsConfiguration"},"Usage: yarn rmadmin [-refreshSuperUserGroupsConfiguration]",dataErr,0); testError(new String[]{"-help","-refreshAdminAcls"},"Usage: yarn rmadmin [-refreshAdminAcls]",dataErr,0); testError(new String[]{"-help","-refreshServiceAcl"},"Usage: yarn rmadmin [-refreshServiceAcl]",dataErr,0); testError(new String[]{"-help","-getGroups"},"Usage: yarn rmadmin [-getGroups [username]]",dataErr,0); testError(new String[]{"-help","-transitionToActive"},"Usage: yarn rmadmin [-transitionToActive " + " [--forceactive]]",dataErr,0); testError(new String[]{"-help","-transitionToStandby"},"Usage: yarn rmadmin [-transitionToStandby ]",dataErr,0); testError(new String[]{"-help","-getServiceState"},"Usage: yarn rmadmin [-getServiceState ]",dataErr,0); testError(new String[]{"-help","-checkHealth"},"Usage: yarn rmadmin [-checkHealth ]",dataErr,0); testError(new String[]{"-help","-failover"},"Usage: yarn rmadmin " + "[-failover [--forcefence] [--forceactive] " + " ]",dataErr,0); testError(new String[]{"-help","-badParameter"},"Usage: yarn rmadmin",dataErr,0); testError(new String[]{"-badParameter"},"badParameter: Unknown command",dataErr,-1); assertEquals(0,rmAdminCLIWithHAEnabled.run(args)); oldOutPrintStream.println(dataOut); assertTrue(dataOut.toString().contains("yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper" + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup"+ " [username]] [-help [cmd]] [-transitionToActive "+ " [--forceactive]] [-transitionToStandby ] [-failover"+ " [--forcefence] [--forceactive] ] "+ "[-getServiceState ] [-checkHealth ]")); } finally { System.setOut(oldOutPrintStream); System.setErr(oldErrPrintStream); } }

    EqualityVerifier 
    @Test(timeout=500) public void testRefreshAdminAcls() throws Exception { String[] args={"-refreshAdminAcls"}; assertEquals(0,rmAdminCLI.run(args)); verify(admin).refreshAdminAcls(any(RefreshAdminAclsRequest.class)); }

    EqualityVerifier 
    @Test(timeout=500) public void testRefreshServiceAcl() throws Exception { String[] args={"-refreshServiceAcl"}; assertEquals(0,rmAdminCLI.run(args)); verify(admin).refreshServiceAcls(any(RefreshServiceAclsRequest.class)); }

    EqualityVerifier 
    @Test(timeout=500) public void testRefreshQueues() throws Exception { String[] args={"-refreshQueues"}; assertEquals(0,rmAdminCLI.run(args)); verify(admin).refreshQueues(any(RefreshQueuesRequest.class)); }

    EqualityVerifier 
    @Test(timeout=500) public void testRefreshSuperUserGroupsConfiguration() throws Exception { String[] args={"-refreshSuperUserGroupsConfiguration"}; assertEquals(0,rmAdminCLI.run(args)); verify(admin).refreshSuperUserGroupsConfiguration(any(RefreshSuperUserGroupsConfigurationRequest.class)); }

    Class: org.apache.hadoop.yarn.client.TestRMFailover

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testWebAppProxyInStandAloneMode() throws YarnException, InterruptedException, IOException { conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); WebAppProxyServer webAppProxyServer=new WebAppProxyServer(); try { conf.set(YarnConfiguration.PROXY_ADDRESS,"0.0.0.0:9099"); cluster.init(conf); cluster.start(); getAdminService(0).transitionToActive(req); assertFalse("RM never turned active",-1 == cluster.getActiveRMIndex()); verifyConnections(); webAppProxyServer.init(conf); Assert.assertEquals(STATE.INITED,webAppProxyServer.getServiceState()); webAppProxyServer.start(); Assert.assertEquals(STATE.STARTED,webAppProxyServer.getServiceState()); URL wrongUrl=new URL("http://0.0.0.0:9099/proxy/" + fakeAppId); HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection(); proxyConn.connect(); verifyResponse(proxyConn); explicitFailover(); verifyConnections(); proxyConn.connect(); verifyResponse(proxyConn); } finally { webAppProxyServer.stop(); } }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRMWebAppRedirect() throws YarnException, InterruptedException, IOException { cluster=new MiniYARNCluster(TestRMFailover.class.getName(),2,0,1,1); conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); cluster.init(conf); cluster.start(); getAdminService(0).transitionToActive(req); String rm1Url="http://0.0.0.0:18088"; String rm2Url="http://0.0.0.0:28088"; String header=getHeader("Refresh",rm2Url); assertTrue(header.contains("; url=" + rm1Url)); header=getHeader("Refresh",rm2Url + "/metrics"); assertTrue(header.contains("; url=" + rm1Url)); header=getHeader("Refresh",rm2Url + "/jmx"); assertTrue(header.contains("; url=" + rm1Url)); header=getHeader("Refresh",rm2Url + "/cluster/cluster"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/conf"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/stacks"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/logLevel"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/static"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/logs"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/ws/v1/cluster/info"); assertEquals(null,header); header=getHeader("Refresh",rm2Url + "/ws/v1/cluster/apps"); assertTrue(header.contains("; url=" + rm1Url)); }

    Class: org.apache.hadoop.yarn.client.TestYarnApiClasses

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Test CancelDelegationTokenRequestPBImpl. * Test a transformation to prototype and back */ @Test public void testCancelDelegationTokenRequestPBImpl(){ Token token=getDelegationToken(); CancelDelegationTokenRequestPBImpl original=new CancelDelegationTokenRequestPBImpl(); original.setDelegationToken(token); CancelDelegationTokenRequestProto protoType=original.getProto(); CancelDelegationTokenRequestPBImpl copy=new CancelDelegationTokenRequestPBImpl(protoType); assertNotNull(copy.getDelegationToken()); assertEquals(token,copy.getDelegationToken()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Test RenewDelegationTokenRequestPBImpl. * Test a transformation to prototype and back */ @Test public void testRenewDelegationTokenRequestPBImpl(){ Token token=getDelegationToken(); RenewDelegationTokenRequestPBImpl original=new RenewDelegationTokenRequestPBImpl(); original.setDelegationToken(token); RenewDelegationTokenRequestProto protoType=original.getProto(); RenewDelegationTokenRequestPBImpl copy=new RenewDelegationTokenRequestPBImpl(protoType); assertNotNull(copy.getDelegationToken()); assertEquals(token,copy.getDelegationToken()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    /** * Simple test Resource request. * Test hashCode, equals and compare. */ @Test public void testResourceRequest(){ Resource resource=recordFactory.newRecordInstance(Resource.class); Priority priority=recordFactory.newRecordInstance(Priority.class); ResourceRequest original=ResourceRequest.newInstance(priority,"localhost",resource,2); ResourceRequest copy=ResourceRequest.newInstance(priority,"localhost",resource,2); assertTrue(original.equals(copy)); assertEquals(0,original.compareTo(copy)); assertTrue(original.hashCode() == copy.hashCode()); copy.setNumContainers(1); assertFalse(original.equals(copy)); assertNotSame(0,original.compareTo(copy)); assertFalse(original.hashCode() == copy.hashCode()); }

    Class: org.apache.hadoop.yarn.client.api.async.impl.TestAMRMClientAsync

    APIUtilityVerifier IterativeVerifier EqualityVerifier 
    @SuppressWarnings("unchecked") @Test(timeout=10000) public void testAMRMClientAsync() throws Exception { Configuration conf=new Configuration(); final AtomicBoolean heartbeatBlock=new AtomicBoolean(true); List completed1=Arrays.asList(ContainerStatus.newInstance(newContainerId(0,0,0,0),ContainerState.COMPLETE,"",0)); List allocated1=Arrays.asList(Container.newInstance(null,null,null,null,null,null)); final AllocateResponse response1=createAllocateResponse(new ArrayList(),allocated1,null); final AllocateResponse response2=createAllocateResponse(completed1,new ArrayList(),null); final AllocateResponse emptyResponse=createAllocateResponse(new ArrayList(),new ArrayList(),null); TestCallbackHandler callbackHandler=new TestCallbackHandler(); final AMRMClient client=mock(AMRMClientImpl.class); final AtomicInteger secondHeartbeatSync=new AtomicInteger(0); when(client.allocate(anyFloat())).thenReturn(response1).thenAnswer(new Answer(){ @Override public AllocateResponse answer( InvocationOnMock invocation) throws Throwable { secondHeartbeatSync.incrementAndGet(); while (heartbeatBlock.get()) { synchronized (heartbeatBlock) { heartbeatBlock.wait(); } } secondHeartbeatSync.incrementAndGet(); return response2; } } ).thenReturn(emptyResponse); when(client.registerApplicationMaster(anyString(),anyInt(),anyString())).thenReturn(null); when(client.getAvailableResources()).thenAnswer(new Answer(){ @Override public Resource answer( InvocationOnMock invocation) throws Throwable { synchronized (client) { Thread.sleep(10); } return null; } } ); AMRMClientAsync asyncClient=AMRMClientAsync.createAMRMClientAsync(client,20,callbackHandler); asyncClient.init(conf); asyncClient.start(); asyncClient.registerApplicationMaster("localhost",1234,null); while (secondHeartbeatSync.get() < 1) { Thread.sleep(10); } assert (secondHeartbeatSync.get() < 2); asyncClient.getAvailableResources(); assert (secondHeartbeatSync.get() < 2); synchronized (heartbeatBlock) { heartbeatBlock.set(false); heartbeatBlock.notifyAll(); } Assert.assertEquals(null,callbackHandler.takeCompletedContainers()); while (callbackHandler.takeAllocatedContainers() == null) { Assert.assertEquals(null,callbackHandler.takeCompletedContainers()); Thread.sleep(10); } while (callbackHandler.takeCompletedContainers() == null) { Thread.sleep(10); } asyncClient.stop(); Assert.assertEquals(null,callbackHandler.takeAllocatedContainers()); Assert.assertEquals(null,callbackHandler.takeCompletedContainers()); }

    Class: org.apache.hadoop.yarn.client.api.async.impl.TestNMClientAsync

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testNMClientAsync() throws Exception { Configuration conf=new Configuration(); conf.setInt(YarnConfiguration.NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE,10); int expectedSuccess=40; int expectedFailure=40; asyncClient=new MockNMClientAsync1(expectedSuccess,expectedFailure); asyncClient.init(conf); Assert.assertEquals("The max thread pool size is not correctly set",10,asyncClient.maxThreadPoolSize); asyncClient.start(); for (int i=0; i < expectedSuccess + expectedFailure; ++i) { if (i == expectedSuccess) { while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isAllSuccessCallsExecuted()) { Thread.sleep(10); } asyncClient.setClient(mockNMClient(1)); } Container container=mockContainer(i); ContainerLaunchContext clc=recordFactory.newRecordInstance(ContainerLaunchContext.class); asyncClient.startContainerAsync(container,clc); } while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isStartAndQueryFailureCallsExecuted()) { Thread.sleep(10); } asyncClient.setClient(mockNMClient(2)); ((TestCallbackHandler1)asyncClient.getCallbackHandler()).path=false; for (int i=0; i < expectedFailure; ++i) { Container container=mockContainer(expectedSuccess + expectedFailure + i); ContainerLaunchContext clc=recordFactory.newRecordInstance(ContainerLaunchContext.class); asyncClient.startContainerAsync(container,clc); } while (!((TestCallbackHandler1)asyncClient.getCallbackHandler()).isStopFailureCallsExecuted()) { Thread.sleep(10); } for ( String errorMsg : ((TestCallbackHandler1)asyncClient.getCallbackHandler()).errorMsgs) { System.out.println(errorMsg); } Assert.assertEquals("Error occurs in CallbackHandler",0,((TestCallbackHandler1)asyncClient.getCallbackHandler()).errorMsgs.size()); for ( String errorMsg : ((MockNMClientAsync1)asyncClient).errorMsgs) { System.out.println(errorMsg); } Assert.assertEquals("Error occurs in ContainerEventProcessor",0,((MockNMClientAsync1)asyncClient).errorMsgs.size()); while (asyncClient.containers.size() > 0) { Thread.sleep(10); } asyncClient.stop(); Assert.assertFalse("The thread of Container Management Event Dispatcher is still alive",asyncClient.eventDispatcherThread.isAlive()); Assert.assertTrue("The thread pool is not shut down",asyncClient.threadPool.isShutdown()); }

    Class: org.apache.hadoop.yarn.client.api.impl.TestAHSClient

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=10000) public void testGetApplicationReport() throws YarnException, IOException { Configuration conf=new Configuration(); final AHSClient client=new MockAHSClient(); client.init(conf); client.start(); List expectedReports=((MockAHSClient)client).getReports(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationReport report=client.getApplicationReport(applicationId); Assert.assertEquals(report,expectedReports.get(0)); Assert.assertEquals(report.getApplicationId().toString(),expectedReports.get(0).getApplicationId().toString()); client.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=10000) public void testGetApplications() throws YarnException, IOException { Configuration conf=new Configuration(); final AHSClient client=new MockAHSClient(); client.init(conf); client.start(); List expectedReports=((MockAHSClient)client).getReports(); List reports=client.getApplications(); Assert.assertEquals(reports,expectedReports); reports=client.getApplications(); Assert.assertEquals(reports.size(),4); client.stop(); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testGetApplicationAttempts() throws YarnException, IOException { Configuration conf=new Configuration(); final AHSClient client=new MockAHSClient(); client.init(conf); client.start(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); List reports=client.getApplicationAttempts(applicationId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,1)); Assert.assertEquals(reports.get(1).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,2)); client.stop(); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testGetApplicationAttempt() throws YarnException, IOException { Configuration conf=new Configuration(); final AHSClient client=new MockAHSClient(); client.init(conf); client.start(); List expectedReports=((MockAHSClient)client).getReports(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); ApplicationAttemptReport report=client.getApplicationAttemptReport(appAttemptId); Assert.assertNotNull(report); Assert.assertEquals(report.getApplicationAttemptId().toString(),expectedReports.get(0).getCurrentApplicationAttemptId().toString()); client.stop(); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testGetContainerReport() throws YarnException, IOException { Configuration conf=new Configuration(); final AHSClient client=new MockAHSClient(); client.init(conf); client.start(); List expectedReports=((MockAHSClient)client).getReports(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); ContainerReport report=client.getContainerReport(containerId); Assert.assertNotNull(report); Assert.assertEquals(report.getContainerId().toString(),(ContainerId.newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(),1)).toString()); client.stop(); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testGetContainers() throws YarnException, IOException { Configuration conf=new Configuration(); final AHSClient client=new MockAHSClient(); client.init(conf); client.start(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); List reports=client.getContainers(appAttemptId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getContainerId(),(ContainerId.newInstance(appAttemptId,1))); Assert.assertEquals(reports.get(1).getContainerId(),(ContainerId.newInstance(appAttemptId,2))); client.stop(); }

    Class: org.apache.hadoop.yarn.client.api.impl.TestAMRMClient

    APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testAMRMClientMatchingFit() throws YarnException, IOException { AMRMClient amClient=null; try { amClient=AMRMClient.createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host",10000,""); Resource capability1=Resource.newInstance(1024,2); Resource capability2=Resource.newInstance(1024,1); Resource capability3=Resource.newInstance(1000,2); Resource capability4=Resource.newInstance(2000,1); Resource capability5=Resource.newInstance(1000,3); Resource capability6=Resource.newInstance(2000,1); Resource capability7=Resource.newInstance(2000,1); ContainerRequest storedContainer1=new ContainerRequest(capability1,nodes,racks,priority); ContainerRequest storedContainer2=new ContainerRequest(capability2,nodes,racks,priority); ContainerRequest storedContainer3=new ContainerRequest(capability3,nodes,racks,priority); ContainerRequest storedContainer4=new ContainerRequest(capability4,nodes,racks,priority); ContainerRequest storedContainer5=new ContainerRequest(capability5,nodes,racks,priority); ContainerRequest storedContainer6=new ContainerRequest(capability6,nodes,racks,priority); ContainerRequest storedContainer7=new ContainerRequest(capability7,nodes,racks,priority2,false); amClient.addContainerRequest(storedContainer1); amClient.addContainerRequest(storedContainer2); amClient.addContainerRequest(storedContainer3); amClient.addContainerRequest(storedContainer4); amClient.addContainerRequest(storedContainer5); amClient.addContainerRequest(storedContainer6); amClient.addContainerRequest(storedContainer7); List> matches; ContainerRequest storedRequest; Resource testCapability1=Resource.newInstance(1024,2); matches=amClient.getMatchingRequests(priority,node,testCapability1); verifyMatches(matches,1); storedRequest=matches.get(0).iterator().next(); assertEquals(storedContainer1,storedRequest); amClient.removeContainerRequest(storedContainer1); Resource testCapability2=Resource.newInstance(2000,1); matches=amClient.getMatchingRequests(priority,node,testCapability2); verifyMatches(matches,2); int i=0; for ( ContainerRequest storedRequest1 : matches.get(0)) { if (i++ == 0) { assertEquals(storedContainer4,storedRequest1); } else { assertEquals(storedContainer6,storedRequest1); } } amClient.removeContainerRequest(storedContainer6); Resource testCapability3=Resource.newInstance(4000,4); matches=amClient.getMatchingRequests(priority,node,testCapability3); assert (matches.size() == 4); Resource testCapability4=Resource.newInstance(1024,2); matches=amClient.getMatchingRequests(priority,node,testCapability4); assert (matches.size() == 2); for ( Collection testSet : matches) { assertEquals(1,testSet.size()); ContainerRequest testRequest=testSet.iterator().next(); assertTrue(testRequest != storedContainer4); assertTrue(testRequest != storedContainer5); assert (testRequest == storedContainer2 || testRequest == storedContainer3); } Resource testCapability5=Resource.newInstance(512,4); matches=amClient.getMatchingRequests(priority,node,testCapability5); assert (matches.size() == 0); Resource testCapability7=Resource.newInstance(2000,1); matches=amClient.getMatchingRequests(priority2,ResourceRequest.ANY,testCapability7); assert (matches.size() == 0); matches=amClient.getMatchingRequests(priority2,node,testCapability7); assert (matches.size() == 1); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

    EqualityVerifier 
    @Test public void testWaitFor() throws InterruptedException { AMRMClientImpl amClient=null; CountDownSupplier countDownChecker=new CountDownSupplier(); try { amClient=(AMRMClientImpl)AMRMClient.createAMRMClient(); amClient.init(new YarnConfiguration()); amClient.start(); amClient.waitFor(countDownChecker,1000); assertEquals(3,countDownChecker.counter); } finally { if (amClient != null) { amClient.stop(); } } }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testAllocationWithBlacklist() throws YarnException, IOException { AMRMClientImpl amClient=null; try { amClient=(AMRMClientImpl)AMRMClient.createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host",10000,""); assertEquals(0,amClient.ask.size()); assertEquals(0,amClient.release.size()); ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,racks,priority); amClient.addContainerRequest(storedContainer1); assertEquals(3,amClient.ask.size()); assertEquals(0,amClient.release.size()); List localNodeBlacklist=new ArrayList(); localNodeBlacklist.add(node); amClient.updateBlacklist(localNodeBlacklist,null); int allocatedContainerCount=getAllocatedContainersNumber(amClient,DEFAULT_ITERATION); assertEquals(0,allocatedContainerCount); amClient.updateBlacklist(null,localNodeBlacklist); ContainerRequest storedContainer2=new ContainerRequest(capability,nodes,racks,priority); amClient.addContainerRequest(storedContainer2); allocatedContainerCount=getAllocatedContainersNumber(amClient,DEFAULT_ITERATION); assertEquals(2,allocatedContainerCount); assertTrue(amClient.blacklistAdditions.isEmpty()); assertTrue(amClient.blacklistRemovals.isEmpty()); ContainerRequest invalidContainerRequest=new ContainerRequest(Resource.newInstance(-1024,1),nodes,racks,priority); amClient.addContainerRequest(invalidContainerRequest); amClient.updateBlacklist(localNodeBlacklist,null); try { amClient.allocate(0.1f); fail("there should be an exception here."); } catch ( Exception e) { assertEquals(1,amClient.blacklistAdditions.size()); } } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testAMRMClientWithBlacklist() throws YarnException, IOException { AMRMClientImpl amClient=null; try { amClient=(AMRMClientImpl)AMRMClient.createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host",10000,""); String[] nodes={"node1","node2","node3"}; List nodeList01=new ArrayList(); nodeList01.add(nodes[0]); nodeList01.add(nodes[1]); amClient.updateBlacklist(nodeList01,null); assertEquals(2,amClient.blacklistAdditions.size()); assertEquals(0,amClient.blacklistRemovals.size()); List nodeList02=new ArrayList(); nodeList02.add(nodes[0]); nodeList02.add(nodes[2]); amClient.updateBlacklist(nodeList02,null); assertEquals(3,amClient.blacklistAdditions.size()); assertEquals(0,amClient.blacklistRemovals.size()); List nodeList12=new ArrayList(); nodeList12.add(nodes[1]); nodeList12.add(nodes[2]); amClient.updateBlacklist(null,nodeList12); assertEquals(1,amClient.blacklistAdditions.size()); assertEquals(2,amClient.blacklistRemovals.size()); List nodeList1=new ArrayList(); nodeList1.add(nodes[1]); amClient.updateBlacklist(nodeList1,null); assertEquals(2,amClient.blacklistAdditions.size()); assertEquals(1,amClient.blacklistRemovals.size()); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

    UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testAMRMClientOnAMRMTokenRollOver() throws YarnException, IOException { AMRMClient amClient=null; try { AMRMTokenSecretManager amrmTokenSecretManager=yarnCluster.getResourceManager().getRMContext().getAMRMTokenSecretManager(); amClient=AMRMClient.createAMRMClient(); amClient.init(conf); amClient.start(); Long startTime=System.currentTimeMillis(); amClient.registerApplicationMaster("Host",10000,""); org.apache.hadoop.security.token.Token amrmToken_1=getAMRMToken(); Assert.assertNotNull(amrmToken_1); Assert.assertEquals(amrmToken_1.decodeIdentifier().getKeyId(),amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId()); while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) { amClient.allocate(0.1f); try { Thread.sleep(1000); } catch ( InterruptedException e) { e.printStackTrace(); } } amClient.allocate(0.1f); org.apache.hadoop.security.token.Token amrmToken_2=getAMRMToken(); Assert.assertNotNull(amrmToken_2); Assert.assertEquals(amrmToken_2.decodeIdentifier().getKeyId(),amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId()); Assert.assertNotEquals(amrmToken_1,amrmToken_2); amClient.allocate(0.1f); while (true) { if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getCurrnetMasterKeyData().getMasterKey().getKeyId()) { if (amrmTokenSecretManager.getNextMasterKeyData() == null) { break; } else if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getNextMasterKeyData().getMasterKey().getKeyId()) { break; } } amClient.allocate(0.1f); try { Thread.sleep(1000); } catch ( InterruptedException e) { } } try { UserGroupInformation testUser=UserGroupInformation.createRemoteUser("testUser"); SecurityUtil.setTokenService(amrmToken_2,yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress()); testUser.addToken(amrmToken_2); testUser.doAs(new PrivilegedAction(){ @Override public ApplicationMasterProtocol run(){ return (ApplicationMasterProtocol)YarnRPC.create(conf).getProxy(ApplicationMasterProtocol.class,yarnCluster.getResourceManager().getApplicationMasterService().getBindAddress(),conf); } } ).allocate(Records.newRecord(AllocateRequest.class)); Assert.fail("The old Token should not work"); } catch ( Exception ex) { Assert.assertTrue(ex instanceof InvalidToken); Assert.assertTrue(ex.getMessage().contains("Invalid AMRMToken from " + amrmToken_2.decodeIdentifier().getApplicationAttemptId())); } amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOException { AMRMClientImpl amClient=null; try { amClient=new AMRMClientImpl(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host",10000,""); Resource capability=Resource.newInstance(1024,2); ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,null,priority); amClient.addContainerRequest(storedContainer1); List> matches; ContainerRequest storedRequest; matches=amClient.getMatchingRequests(priority,node,capability); verifyMatches(matches,1); storedRequest=matches.get(0).iterator().next(); assertEquals(storedContainer1,storedRequest); matches=amClient.getMatchingRequests(priority,rack,capability); verifyMatches(matches,1); storedRequest=matches.get(0).iterator().next(); assertEquals(storedContainer1,storedRequest); amClient.removeContainerRequest(storedContainer1); matches=amClient.getMatchingRequests(priority,rack,capability); assertTrue(matches.isEmpty()); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test public void testAMRMClientMatchStorage() throws YarnException, IOException { AMRMClientImpl amClient=null; try { amClient=(AMRMClientImpl)AMRMClient.createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host",10000,""); Priority priority1=Records.newRecord(Priority.class); priority1.setPriority(2); ContainerRequest storedContainer1=new ContainerRequest(capability,nodes,racks,priority); ContainerRequest storedContainer2=new ContainerRequest(capability,nodes,racks,priority); ContainerRequest storedContainer3=new ContainerRequest(capability,null,null,priority1); amClient.addContainerRequest(storedContainer1); amClient.addContainerRequest(storedContainer2); amClient.addContainerRequest(storedContainer3); int containersRequestedAny=amClient.remoteRequestsTable.get(priority).get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers(); assertEquals(2,containersRequestedAny); containersRequestedAny=amClient.remoteRequestsTable.get(priority1).get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers(); assertEquals(1,containersRequestedAny); List> matches=amClient.getMatchingRequests(priority,node,capability); verifyMatches(matches,2); matches=amClient.getMatchingRequests(priority,rack,capability); verifyMatches(matches,2); matches=amClient.getMatchingRequests(priority,ResourceRequest.ANY,capability); verifyMatches(matches,2); matches=amClient.getMatchingRequests(priority1,rack,capability); assertTrue(matches.isEmpty()); matches=amClient.getMatchingRequests(priority1,ResourceRequest.ANY,capability); verifyMatches(matches,1); amClient.removeContainerRequest(storedContainer3); matches=amClient.getMatchingRequests(priority,node,capability); verifyMatches(matches,2); amClient.removeContainerRequest(storedContainer2); matches=amClient.getMatchingRequests(priority,node,capability); verifyMatches(matches,1); matches=amClient.getMatchingRequests(priority,rack,capability); verifyMatches(matches,1); ContainerRequest storedRequest=matches.get(0).iterator().next(); assertEquals(storedContainer1,storedRequest); amClient.removeContainerRequest(storedContainer1); matches=amClient.getMatchingRequests(priority,ResourceRequest.ANY,capability); assertTrue(matches.isEmpty()); matches=amClient.getMatchingRequests(priority1,ResourceRequest.ANY,capability); assertTrue(matches.isEmpty()); assertTrue(amClient.remoteRequestsTable.isEmpty()); amClient.addContainerRequest(storedContainer1); amClient.addContainerRequest(storedContainer3); int allocatedContainerCount=0; int iterationsLeft=3; while (allocatedContainerCount < 2 && iterationsLeft-- > 0) { Log.info(" == alloc " + allocatedContainerCount + " it left "+ iterationsLeft); AllocateResponse allocResponse=amClient.allocate(0.1f); assertEquals(0,amClient.ask.size()); assertEquals(0,amClient.release.size()); assertEquals(nodeCount,amClient.getClusterNodeCount()); allocatedContainerCount+=allocResponse.getAllocatedContainers().size(); for ( Container container : allocResponse.getAllocatedContainers()) { ContainerRequest expectedRequest=container.getPriority().equals(storedContainer1.getPriority()) ? storedContainer1 : storedContainer3; matches=amClient.getMatchingRequests(container.getPriority(),ResourceRequest.ANY,container.getResource()); verifyMatches(matches,1); ContainerRequest matchedRequest=matches.get(0).iterator().next(); assertEquals(matchedRequest,expectedRequest); amClient.removeContainerRequest(matchedRequest); amClient.releaseAssignedContainer(container.getId()); } if (allocatedContainerCount < containersRequestedAny) { sleep(100); } } assertEquals(2,allocatedContainerCount); AllocateResponse allocResponse=amClient.allocate(0.1f); assertEquals(0,amClient.release.size()); assertEquals(0,amClient.ask.size()); assertEquals(0,allocResponse.getAllocatedContainers().size()); assertTrue(amClient.remoteRequestsTable.isEmpty()); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } }

    Class: org.apache.hadoop.yarn.client.api.impl.TestAMRMClientOnRMRestart

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testAMRMClientResendsRequestsOnRMRestart() throws Exception { UserGroupInformation.setLoginUser(null); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MyResourceManager rm1=new MyResourceManager(conf,memStore); rm1.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher(); RMApp app=rm1.submitApp(1024); dispatcher.await(); MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm1.sendAMLaunched(appAttemptId); dispatcher.await(); org.apache.hadoop.security.token.Token token=rm1.getRMContext().getRMApps().get(appAttemptId.getApplicationId()).getRMAppAttempt(appAttemptId).getAMRMToken(); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); ugi.addTokenIdentifier(token.decodeIdentifier()); AMRMClient amClient=new MyAMRMClientImpl(rm1); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host",10000,""); ContainerRequest cRequest1=createReq(1,1024,new String[]{"h1"}); amClient.addContainerRequest(cRequest1); ContainerRequest cRequest2=createReq(1,1024,new String[]{"h1","h2"}); amClient.addContainerRequest(cRequest2); List blacklistAdditions=new ArrayList(); List blacklistRemoval=new ArrayList(); blacklistAdditions.add("h2"); blacklistRemoval.add("h10"); amClient.updateBlacklist(blacklistAdditions,blacklistRemoval); blacklistAdditions.remove("h2"); AllocateResponse allocateResponse=amClient.allocate(0.1f); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,allocateResponse.getAllocatedContainers().size()); assertAsksAndReleases(4,0,rm1); assertBlacklistAdditionsAndRemovals(1,1,rm1); nm1.nodeHeartbeat(true); dispatcher.await(); allocateResponse=amClient.allocate(0.2f); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",2,allocateResponse.getAllocatedContainers().size()); assertAsksAndReleases(0,0,rm1); assertBlacklistAdditionsAndRemovals(0,0,rm1); List allocatedContainers=allocateResponse.getAllocatedContainers(); amClient.removeContainerRequest(cRequest1); amClient.removeContainerRequest(cRequest2); allocateResponse=amClient.allocate(0.2f); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,allocateResponse.getAllocatedContainers().size()); assertAsksAndReleases(4,0,rm1); assertBlacklistAdditionsAndRemovals(0,0,rm1); ContainerRequest cRequest3=createReq(1,1024,new String[]{"h1"}); amClient.addContainerRequest(cRequest3); int pendingRelease=0; Iterator it=allocatedContainers.iterator(); while (it.hasNext()) { amClient.releaseAssignedContainer(it.next().getId()); pendingRelease++; it.remove(); break; } allocateResponse=amClient.allocate(0.3f); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,allocateResponse.getAllocatedContainers().size()); assertAsksAndReleases(3,pendingRelease,rm1); assertBlacklistAdditionsAndRemovals(0,0,rm1); int completedContainer=allocateResponse.getCompletedContainersStatuses().size(); pendingRelease-=completedContainer; MyResourceManager rm2=new MyResourceManager(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); ((MyAMRMClientImpl)amClient).updateRMProxy(rm2); dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher(); NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction()); nm1=new MockNM("h1:1234",10240,rm2.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); dispatcher.await(); blacklistAdditions.add("h3"); amClient.updateBlacklist(blacklistAdditions,null); blacklistAdditions.remove("h3"); it=allocatedContainers.iterator(); while (it.hasNext()) { amClient.releaseAssignedContainer(it.next().getId()); pendingRelease++; it.remove(); } ContainerRequest cRequest4=createReq(1,1024,new String[]{"h1","h2"}); amClient.addContainerRequest(cRequest4); allocateResponse=amClient.allocate(0.3f); dispatcher.await(); completedContainer=allocateResponse.getCompletedContainersStatuses().size(); pendingRelease-=completedContainer; assertAsksAndReleases(4,pendingRelease,rm2); assertBlacklistAdditionsAndRemovals(2,0,rm2); ContainerRequest cRequest5=createReq(1,1024,new String[]{"h1","h2","h3"}); amClient.addContainerRequest(cRequest5); allocateResponse=amClient.allocate(0.5f); dispatcher.await(); Assert.assertEquals("No of assignments must be 0",0,allocateResponse.getAllocatedContainers().size()); assertAsksAndReleases(5,0,rm2); assertBlacklistAdditionsAndRemovals(0,0,rm2); int noAssignedContainer=0; int count=5; while (count-- > 0) { nm1.nodeHeartbeat(true); dispatcher.await(); allocateResponse=amClient.allocate(0.5f); dispatcher.await(); noAssignedContainer+=allocateResponse.getAllocatedContainers().size(); if (noAssignedContainer == 3) { break; } Thread.sleep(1000); } Assert.assertEquals("Number of container should be 3",3,noAssignedContainer); amClient.stop(); rm1.stop(); rm2.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testAMRMClientForUnregisterAMOnRMRestart() throws Exception { MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MyResourceManager rm1=new MyResourceManager(conf,memStore); rm1.start(); DrainDispatcher dispatcher=(DrainDispatcher)rm1.getRMContext().getDispatcher(); RMApp app=rm1.submitApp(1024); dispatcher.await(); MockNM nm1=new MockNM("h1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); dispatcher.await(); ApplicationAttemptId appAttemptId=app.getCurrentAppAttempt().getAppAttemptId(); rm1.sendAMLaunched(appAttemptId); dispatcher.await(); org.apache.hadoop.security.token.Token token=rm1.getRMContext().getRMApps().get(appAttemptId.getApplicationId()).getRMAppAttempt(appAttemptId).getAMRMToken(); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); ugi.addTokenIdentifier(token.decodeIdentifier()); AMRMClient amClient=new MyAMRMClientImpl(rm1); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("h1",10000,""); amClient.allocate(0.1f); MyResourceManager rm2=new MyResourceManager(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); ((MyAMRMClientImpl)amClient).updateRMProxy(rm2); dispatcher=(DrainDispatcher)rm2.getRMContext().getDispatcher(); NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction()); nm1=new MockNM("h1:1234",10240,rm2.getResourceTrackerService()); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); NMContainerStatus containerReport=NMContainerStatus.newInstance(containerId,ContainerState.RUNNING,Resource.newInstance(1024,1),"recover container",0,Priority.newInstance(0),0); nm1.registerNode(Arrays.asList(containerReport),null); nm1.nodeHeartbeat(true); dispatcher.await(); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); rm2.waitForState(appAttemptId,RMAppAttemptState.FINISHING); nm1.nodeHeartbeat(appAttemptId,1,ContainerState.COMPLETE); rm2.waitForState(appAttemptId,RMAppAttemptState.FINISHED); rm2.waitForState(app.getApplicationId(),RMAppState.FINISHED); amClient.stop(); rm1.stop(); rm2.stop(); }

    Class: org.apache.hadoop.yarn.client.api.impl.TestNMClient

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=180000) public void testNMClientNoCleanupOnStop() throws YarnException, IOException { rmClient.registerApplicationMaster("Host",10000,""); testContainerManagement(nmClient,allocateContainers(rmClient,5)); rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED,null,null); stopNmClient(false); assertFalse(nmClient.startedContainers.isEmpty()); nmClient.cleanupRunningContainers(); assertEquals(0,nmClient.startedContainers.size()); }

    BranchVerifier TestInitializer UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Before public void setup() throws YarnException, IOException { conf=new YarnConfiguration(); yarnCluster=new MiniYARNCluster(TestAMRMClient.class.getName(),nodeCount,1,1); yarnCluster.init(conf); yarnCluster.start(); assertNotNull(yarnCluster); assertEquals(STATE.STARTED,yarnCluster.getServiceState()); yarnClient=(YarnClientImpl)YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); assertNotNull(yarnClient); assertEquals(STATE.STARTED,yarnClient.getServiceState()); nodeReports=yarnClient.getNodeReports(NodeState.RUNNING); ApplicationSubmissionContext appContext=yarnClient.createApplication().getApplicationSubmissionContext(); ApplicationId appId=appContext.getApplicationId(); appContext.setApplicationName("Test"); Priority pri=Priority.newInstance(0); appContext.setPriority(pri); appContext.setQueue("default"); ContainerLaunchContext amContainer=Records.newRecord(ContainerLaunchContext.class); appContext.setAMContainerSpec(amContainer); appContext.setUnmanagedAM(true); SubmitApplicationRequest appRequest=Records.newRecord(SubmitApplicationRequest.class); appRequest.setApplicationSubmissionContext(appContext); yarnClient.submitApplication(appContext); int iterationsLeft=30; RMAppAttempt appAttempt=null; while (iterationsLeft > 0) { ApplicationReport appReport=yarnClient.getApplicationReport(appId); if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) { attemptId=appReport.getCurrentApplicationAttemptId(); appAttempt=yarnCluster.getResourceManager().getRMContext().getRMApps().get(attemptId.getApplicationId()).getCurrentAppAttempt(); while (true) { if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) { break; } } break; } sleep(1000); --iterationsLeft; } if (iterationsLeft == 0) { fail("Application hasn't bee started"); } UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser().getUserName())); UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken()); nmTokenCache=new NMTokenCache(); rmClient=(AMRMClientImpl)AMRMClient.createAMRMClient(); rmClient.setNMTokenCache(nmTokenCache); rmClient.init(conf); rmClient.start(); assertNotNull(rmClient); assertEquals(STATE.STARTED,rmClient.getServiceState()); nmClient=(NMClientImpl)NMClient.createNMClient(); nmClient.setNMTokenCache(rmClient.getNMTokenCache()); nmClient.init(conf); nmClient.start(); assertNotNull(nmClient); assertEquals(STATE.STARTED,nmClient.getServiceState()); }

    Class: org.apache.hadoop.yarn.client.api.impl.TestTimelineClient

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testPostEntities() throws Exception { mockClientResponse(client,ClientResponse.Status.OK,false,false); try { TimelinePutResponse response=client.putEntities(generateEntity()); Assert.assertEquals(0,response.getErrors().size()); } catch ( YarnException e) { Assert.fail("Exception is not expected"); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testPostEntitiesWithError() throws Exception { mockClientResponse(client,ClientResponse.Status.OK,true,false); try { TimelinePutResponse response=client.putEntities(generateEntity()); Assert.assertEquals(1,response.getErrors().size()); Assert.assertEquals("test entity id",response.getErrors().get(0).getEntityId()); Assert.assertEquals("test entity type",response.getErrors().get(0).getEntityType()); Assert.assertEquals(TimelinePutResponse.TimelinePutError.IO_EXCEPTION,response.getErrors().get(0).getErrorCode()); } catch ( YarnException e) { Assert.fail("Exception is not expected"); } }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testPostEntitiesTimelineServiceDefaultNotEnabled() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.unset(YarnConfiguration.TIMELINE_SERVICE_ENABLED); TimelineClientImpl client=createTimelineClient(conf); mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false); try { TimelinePutResponse response=client.putEntities(generateEntity()); Assert.assertEquals(0,response.getErrors().size()); } catch ( YarnException e) { Assert.fail("putEntities should already return before throwing the exception"); } }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testPostEntitiesTimelineServiceNotEnabled() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,false); TimelineClientImpl client=createTimelineClient(conf); mockClientResponse(client,ClientResponse.Status.INTERNAL_SERVER_ERROR,false,false); try { TimelinePutResponse response=client.putEntities(generateEntity()); Assert.assertEquals(0,response.getErrors().size()); } catch ( YarnException e) { Assert.fail("putEntities should already return before throwing the exception"); } }

    Class: org.apache.hadoop.yarn.client.api.impl.TestYarnClient

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testGetApplicationAttempts() throws YarnException, IOException { Configuration conf=new Configuration(); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); List reports=client.getApplicationAttempts(applicationId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,1)); Assert.assertEquals(reports.get(1).getApplicationAttemptId(),ApplicationAttemptId.newInstance(applicationId,2)); client.stop(); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testGetContainers() throws YarnException, IOException { Configuration conf=new Configuration(); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); List reports=client.getContainers(appAttemptId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getContainerId(),(ContainerId.newInstance(appAttemptId,1))); Assert.assertEquals(reports.get(1).getContainerId(),(ContainerId.newInstance(appAttemptId,2))); client.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testApplicationType() throws Exception { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); MockRM rm=new MockRM(); rm.start(); RMApp app=rm.submitApp(2000); RMApp app1=rm.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE"); Assert.assertEquals("YARN",app.getApplicationType()); Assert.assertEquals("MAPREDUCE",app1.getApplicationType()); rm.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testGetApplications() throws YarnException, IOException { Configuration conf=new Configuration(); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); List expectedReports=((MockYarnClient)client).getReports(); List reports=client.getApplications(); Assert.assertEquals(reports,expectedReports); Set appTypes=new HashSet(); appTypes.add("YARN"); appTypes.add("NON-YARN"); reports=client.getApplications(appTypes,null); Assert.assertEquals(reports.size(),2); Assert.assertTrue((reports.get(0).getApplicationType().equals("YARN") && reports.get(1).getApplicationType().equals("NON-YARN")) || (reports.get(1).getApplicationType().equals("YARN") && reports.get(0).getApplicationType().equals("NON-YARN"))); for ( ApplicationReport report : reports) { Assert.assertTrue(expectedReports.contains(report)); } EnumSet appStates=EnumSet.noneOf(YarnApplicationState.class); appStates.add(YarnApplicationState.FINISHED); appStates.add(YarnApplicationState.FAILED); reports=client.getApplications(null,appStates); Assert.assertEquals(reports.size(),2); Assert.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN") && reports.get(1).getApplicationType().equals("NON-MAPREDUCE")) || (reports.get(1).getApplicationType().equals("NON-YARN") && reports.get(0).getApplicationType().equals("NON-MAPREDUCE"))); for ( ApplicationReport report : reports) { Assert.assertTrue(expectedReports.contains(report)); } reports=client.getApplications(appTypes,appStates); Assert.assertEquals(reports.size(),1); Assert.assertTrue((reports.get(0).getApplicationType().equals("NON-YARN"))); for ( ApplicationReport report : reports) { Assert.assertTrue(expectedReports.contains(report)); } client.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAutomaticTimelineDelegationTokenLoading() throws Exception { Configuration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,true); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,conf); final Token dToken=new Token(); YarnClientImpl client=new YarnClientImpl(){ @Override protected void serviceInit( Configuration conf) throws Exception { if (getConfig().getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) { timelineServiceEnabled=true; timelineClient=mock(TimelineClient.class); when(timelineClient.getDelegationToken(any(String.class))).thenReturn(dToken); timelineClient.init(getConfig()); timelineService=TimelineUtils.buildTimelineTokenService(getConfig()); } this.setConfig(conf); } @Override protected void serviceStart() throws Exception { rmClient=mock(ApplicationClientProtocol.class); } @Override protected void serviceStop() throws Exception { } @Override public ApplicationReport getApplicationReport( ApplicationId appId){ ApplicationReport report=mock(ApplicationReport.class); when(report.getYarnApplicationState()).thenReturn(YarnApplicationState.SUBMITTED); return report; } @Override public boolean isSecurityEnabled(){ return true; } } ; client.init(conf); client.start(); ApplicationSubmissionContext context=mock(ApplicationSubmissionContext.class); ApplicationId applicationId=ApplicationId.newInstance(0,1); when(context.getApplicationId()).thenReturn(applicationId); DataOutputBuffer dob=new DataOutputBuffer(); Credentials credentials=new Credentials(); credentials.writeTokenStorageToStream(dob); ByteBuffer tokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength()); ContainerLaunchContext clc=ContainerLaunchContext.newInstance(null,null,null,null,tokens,null); when(context.getAMContainerSpec()).thenReturn(clc); client.submitApplication(context); credentials=new Credentials(); DataInputByteBuffer dibb=new DataInputByteBuffer(); tokens=clc.getTokens(); if (tokens != null) { dibb.reset(tokens); credentials.readTokenStorageStream(dibb); tokens.rewind(); } Collection> dTokens=credentials.getAllTokens(); Assert.assertEquals(1,dTokens.size()); Assert.assertEquals(dToken,dTokens.iterator().next()); client.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testApplicationTypeLimit() throws Exception { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); MockRM rm=new MockRM(); rm.start(); RMApp app1=rm.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE-LENGTH-IS-20"); Assert.assertEquals("MAPREDUCE-LENGTH-IS-",app1.getApplicationType()); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testGetApplicationAttempt() throws YarnException, IOException { Configuration conf=new Configuration(); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); List expectedReports=((MockYarnClient)client).getReports(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); ApplicationAttemptReport report=client.getApplicationAttemptReport(appAttemptId); Assert.assertNotNull(report); Assert.assertEquals(report.getApplicationAttemptId().toString(),expectedReports.get(0).getCurrentApplicationAttemptId().toString()); client.stop(); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testGetContainerReport() throws YarnException, IOException { Configuration conf=new Configuration(); final YarnClient client=new MockYarnClient(); client.init(conf); client.start(); List expectedReports=((MockYarnClient)client).getReports(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); ContainerReport report=client.getContainerReport(containerId); Assert.assertNotNull(report); Assert.assertEquals(report.getContainerId().toString(),(ContainerId.newInstance(expectedReports.get(0).getCurrentApplicationAttemptId(),1)).toString()); client.stop(); }

    Class: org.apache.hadoop.yarn.client.cli.TestLogsCLI

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=5000l) public void testHelpMessage() throws Exception { Configuration conf=new YarnConfiguration(); YarnClient mockYarnClient=createMockYarnClient(YarnApplicationState.FINISHED); LogsCLI dumper=new LogsCLIForTest(mockYarnClient); dumper.setConf(conf); int exitCode=dumper.run(new String[]{}); assertTrue(exitCode == -1); ByteArrayOutputStream baos=new ByteArrayOutputStream(); PrintWriter pw=new PrintWriter(baos); pw.println("Retrieve logs for completed YARN applications."); pw.println("usage: yarn logs -applicationId [OPTIONS]"); pw.println(); pw.println("general options are:"); pw.println(" -appOwner AppOwner (assumed to be current user if"); pw.println(" not specified)"); pw.println(" -containerId ContainerId (must be specified if node"); pw.println(" address is specified)"); pw.println(" -nodeAddress NodeAddress in the format nodename:port"); pw.println(" (must be specified if container id is"); pw.println(" specified)"); pw.close(); String appReportStr=baos.toString("UTF-8"); Assert.assertEquals(appReportStr,sysOutStream.toString()); }

    Class: org.apache.hadoop.yarn.client.cli.TestYarnCLI

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetApplicationAttempts() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1); ApplicationAttemptId attemptId1=ApplicationAttemptId.newInstance(applicationId,2); ApplicationAttemptReport attemptReport=ApplicationAttemptReport.newInstance(attemptId,"host",124,"url","diagnostics",YarnApplicationAttemptState.FINISHED,ContainerId.newInstance(attemptId,1)); ApplicationAttemptReport attemptReport1=ApplicationAttemptReport.newInstance(attemptId1,"host",124,"url","diagnostics",YarnApplicationAttemptState.FINISHED,ContainerId.newInstance(attemptId1,1)); List reports=new ArrayList(); reports.add(attemptReport); reports.add(attemptReport1); when(client.getApplicationAttempts(any(ApplicationId.class))).thenReturn(reports); int result=cli.run(new String[]{"applicationattempt","-list",applicationId.toString()}); assertEquals(0,result); verify(client).getApplicationAttempts(applicationId); ByteArrayOutputStream baos=new ByteArrayOutputStream(); PrintWriter pw=new PrintWriter(baos); pw.println("Total number of application attempts :2"); pw.print(" ApplicationAttempt-Id"); pw.print("\t State"); pw.print("\t AM-Container-Id"); pw.println("\t Tracking-URL"); pw.print(" appattempt_1234_0005_000001"); pw.print("\t FINISHED"); pw.print("\t container_1234_0005_01_000001"); pw.println("\t url"); pw.print(" appattempt_1234_0005_000002"); pw.print("\t FINISHED"); pw.print("\t container_1234_0005_02_000001"); pw.println("\t url"); pw.close(); String appReportStr=baos.toString("UTF-8"); Assert.assertEquals(appReportStr,sysOutStream.toString()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetContainers() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1); ContainerId containerId=ContainerId.newInstance(attemptId,1); ContainerId containerId1=ContainerId.newInstance(attemptId,2); ContainerReport container=ContainerReport.newInstance(containerId,null,NodeId.newInstance("host",1234),Priority.UNDEFINED,1234,5678,"diagnosticInfo","logURL",0,ContainerState.COMPLETE); ContainerReport container1=ContainerReport.newInstance(containerId1,null,NodeId.newInstance("host",1234),Priority.UNDEFINED,1234,5678,"diagnosticInfo","logURL",0,ContainerState.COMPLETE); List reports=new ArrayList(); reports.add(container); reports.add(container1); when(client.getContainers(any(ApplicationAttemptId.class))).thenReturn(reports); int result=cli.run(new String[]{"container","-list",attemptId.toString()}); assertEquals(0,result); verify(client).getContainers(attemptId); Log.info(sysOutStream.toString()); ByteArrayOutputStream baos=new ByteArrayOutputStream(); PrintWriter pw=new PrintWriter(baos); pw.println("Total number of containers :2"); pw.print(" Container-Id"); pw.print("\t Start Time"); pw.print("\t Finish Time"); pw.print("\t State"); pw.print("\t Host"); pw.println("\t LOG-URL"); pw.print(" container_1234_0005_01_000001"); pw.print("\t 1234"); pw.print("\t 5678"); pw.print("\t COMPLETE"); pw.print("\t host:1234"); pw.println("\t logURL"); pw.print(" container_1234_0005_01_000002"); pw.print("\t 1234"); pw.print("\t 5678"); pw.print("\t COMPLETE"); pw.print("\t host:1234"); pw.println("\t logURL"); pw.close(); String appReportStr=baos.toString("UTF-8"); Assert.assertEquals(appReportStr,sysOutStream.toString()); }

    EqualityVerifier 
    @Test(timeout=5000) public void testNodesHelpCommand() throws Exception { NodeCLI nodeCLI=new NodeCLI(); nodeCLI.setClient(client); nodeCLI.setSysOutPrintStream(sysOut); nodeCLI.setSysErrPrintStream(sysErr); nodeCLI.run(new String[]{}); Assert.assertEquals(createNodeCLIHelpMessage(),sysOutStream.toString()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testMissingArguments() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); int result=cli.run(new String[]{"application","-status"}); Assert.assertEquals(result,-1); Assert.assertEquals(String.format("Missing argument for options%n%1s",createApplicationCLIHelpMessage()),sysOutStream.toString()); sysOutStream.reset(); result=cli.run(new String[]{"applicationattempt","-status"}); Assert.assertEquals(result,-1); Assert.assertEquals(String.format("Missing argument for options%n%1s",createApplicationAttemptCLIHelpMessage()),sysOutStream.toString()); sysOutStream.reset(); result=cli.run(new String[]{"container","-status"}); Assert.assertEquals(result,-1); Assert.assertEquals(String.format("Missing argument for options%n%1s",createContainerCLIHelpMessage()),sysOutStream.toString()); sysOutStream.reset(); NodeCLI nodeCLI=new NodeCLI(); nodeCLI.setClient(client); nodeCLI.setSysOutPrintStream(sysOut); nodeCLI.setSysErrPrintStream(sysErr); result=nodeCLI.run(new String[]{"-status"}); Assert.assertEquals(result,-1); Assert.assertEquals(String.format("Missing argument for options%n%1s",createNodeCLIHelpMessage()),sysOutStream.toString()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testAppsHelpCommand() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationCLI spyCli=spy(cli); int result=spyCli.run(new String[]{"application","-help"}); Assert.assertTrue(result == 0); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); result=cli.run(new String[]{"application","-kill",applicationId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); NodeId nodeId=NodeId.newInstance("host0",0); result=cli.run(new String[]{"application","-status",nodeId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationCLIHelpMessage(),sysOutStream.toString()); }

    UtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @Test public void testKillApplication() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationReport newApplicationReport2=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.FINISHED,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport2); int result=cli.run(new String[]{"application","-kill",applicationId.toString()}); assertEquals(0,result); verify(client,times(0)).killApplication(any(ApplicationId.class)); verify(sysOut).println("Application " + applicationId + " has already finished "); ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.RUNNING,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport); result=cli.run(new String[]{"application","-kill",applicationId.toString()}); assertEquals(0,result); verify(client).killApplication(any(ApplicationId.class)); verify(sysOut).println("Killing application application_1234_0005"); doThrow(new ApplicationNotFoundException("Application with id '" + applicationId + "' doesn't exist in RM.")).when(client).getApplicationReport(applicationId); cli=createAndGetAppCLI(); try { int exitCode=cli.run(new String[]{"application","-kill",applicationId.toString()}); verify(sysOut).println("Application with id '" + applicationId + "' doesn't exist in RM."); Assert.assertNotSame("should return non-zero exit code.",0,exitCode); } catch ( ApplicationNotFoundException appEx) { Assert.fail("application -kill should not throw" + "ApplicationNotFoundException. " + appEx); } catch ( Exception e) { Assert.fail("Unexpected exception: " + e); } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testContainersHelpCommand() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationCLI spyCli=spy(cli); int result=spyCli.run(new String[]{"container","-help"}); Assert.assertTrue(result == 0); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,6); result=cli.run(new String[]{"container","-list",appAttemptId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); ContainerId containerId=ContainerId.newInstance(appAttemptId,7); result=cli.run(new String[]{"container","-status",containerId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createContainerCLIHelpMessage(),sysOutStream.toString()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAbsentNodeStatus() throws Exception { NodeId nodeId=NodeId.newInstance("Absenthost0",0); NodeCLI cli=new NodeCLI(); when(client.getNodeReports()).thenReturn(getNodeReports(0,NodeState.RUNNING)); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); int result=cli.run(new String[]{"-status",nodeId.toString()}); assertEquals(0,result); verify(client).getNodeReports(); verify(sysOut,times(1)).println(isA(String.class)); verify(sysOut).println("Could not find the node report for node id : " + nodeId.toString()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testListClusterNodes() throws Exception { List nodeReports=new ArrayList(); nodeReports.addAll(getNodeReports(1,NodeState.NEW)); nodeReports.addAll(getNodeReports(2,NodeState.RUNNING)); nodeReports.addAll(getNodeReports(1,NodeState.UNHEALTHY)); nodeReports.addAll(getNodeReports(1,NodeState.DECOMMISSIONED)); nodeReports.addAll(getNodeReports(1,NodeState.REBOOTED)); nodeReports.addAll(getNodeReports(1,NodeState.LOST)); NodeCLI cli=new NodeCLI(); cli.setClient(client); cli.setSysOutPrintStream(sysOut); Set nodeStates=new HashSet(); nodeStates.add(NodeState.NEW); NodeState[] states=nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates)); int result=cli.run(new String[]{"-list","--states","NEW"}); assertEquals(0,result); verify(client).getNodeReports(states); ByteArrayOutputStream baos=new ByteArrayOutputStream(); PrintWriter pw=new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t NEW\t host1:8888\t"); pw.println(" 0"); pw.close(); String nodesReportStr=baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr,sysOutStream.toString()); verify(sysOut,times(1)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.RUNNING); states=nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates)); result=cli.run(new String[]{"-list","--states","RUNNING"}); assertEquals(0,result); verify(client).getNodeReports(states); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total Nodes:2"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.print(" host1:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr=baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr,sysOutStream.toString()); verify(sysOut,times(2)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); result=cli.run(new String[]{"-list"}); assertEquals(0,result); Assert.assertEquals(nodesReportStr,sysOutStream.toString()); verify(sysOut,times(3)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.UNHEALTHY); states=nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates)); result=cli.run(new String[]{"-list","--states","UNHEALTHY"}); assertEquals(0,result); verify(client).getNodeReports(states); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t UNHEALTHY\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr=baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr,sysOutStream.toString()); verify(sysOut,times(4)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.DECOMMISSIONED); states=nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates)); result=cli.run(new String[]{"-list","--states","DECOMMISSIONED"}); assertEquals(0,result); verify(client).getNodeReports(states); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t DECOMMISSIONED\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr=baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr,sysOutStream.toString()); verify(sysOut,times(5)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.REBOOTED); states=nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates)); result=cli.run(new String[]{"-list","--states","REBOOTED"}); assertEquals(0,result); verify(client).getNodeReports(states); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t REBOOTED\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr=baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr,sysOutStream.toString()); verify(sysOut,times(6)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.LOST); states=nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates)); result=cli.run(new String[]{"-list","--states","LOST"}); assertEquals(0,result); verify(client).getNodeReports(states); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t LOST\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr=baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr,sysOutStream.toString()); verify(sysOut,times(7)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.NEW); nodeStates.add(NodeState.RUNNING); nodeStates.add(NodeState.LOST); nodeStates.add(NodeState.REBOOTED); states=nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates)); result=cli.run(new String[]{"-list","--states","NEW,RUNNING,LOST,REBOOTED"}); assertEquals(0,result); verify(client).getNodeReports(states); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total Nodes:5"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t NEW\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.print(" host1:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t REBOOTED\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t LOST\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr=baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr,sysOutStream.toString()); verify(sysOut,times(8)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); nodeStates.clear(); for ( NodeState s : NodeState.values()) { nodeStates.add(s); } states=nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports,nodeStates)); result=cli.run(new String[]{"-list","--all"}); assertEquals(0,result); verify(client).getNodeReports(states); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total Nodes:7"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t NEW\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.print(" host1:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t UNHEALTHY\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t DECOMMISSIONED\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t REBOOTED\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t LOST\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr=baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr,sysOutStream.toString()); verify(sysOut,times(9)).write(any(byte[].class),anyInt(),anyInt()); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetApplicationReportException() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); when(client.getApplicationReport(any(ApplicationId.class))).thenThrow(new ApplicationNotFoundException("History file for application" + applicationId + " is not found")); try { cli.run(new String[]{"application","-status",applicationId.toString()}); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex instanceof ApplicationNotFoundException); Assert.assertEquals("History file for application" + applicationId + " is not found",ex.getMessage()); } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testAppAttemptsHelpCommand() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationCLI spyCli=spy(cli); int result=spyCli.run(new String[]{"applicationattempt","-help"}); Assert.assertTrue(result == 0); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); result=cli.run(new String[]{"applicationattempt","-list",applicationId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString()); sysOutStream.reset(); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(applicationId,6); result=cli.run(new String[]{"applicationattempt","-status",appAttemptId.toString(),"args"}); verify(spyCli).printUsage(any(String.class),any(Options.class)); Assert.assertEquals(createApplicationAttemptCLIHelpMessage(),sysOutStream.toString()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetApplicationAttemptReport() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1); ApplicationAttemptReport attemptReport=ApplicationAttemptReport.newInstance(attemptId,"host",124,"url","diagnostics",YarnApplicationAttemptState.FINISHED,ContainerId.newInstance(attemptId,1)); when(client.getApplicationAttemptReport(any(ApplicationAttemptId.class))).thenReturn(attemptReport); int result=cli.run(new String[]{"applicationattempt","-status",attemptId.toString()}); assertEquals(0,result); verify(client).getApplicationAttemptReport(attemptId); ByteArrayOutputStream baos=new ByteArrayOutputStream(); PrintWriter pw=new PrintWriter(baos); pw.println("Application Attempt Report : "); pw.println("\tApplicationAttempt-Id : appattempt_1234_0005_000001"); pw.println("\tState : FINISHED"); pw.println("\tAMContainer : container_1234_0005_01_000001"); pw.println("\tTracking-URL : url"); pw.println("\tRPC Port : 124"); pw.println("\tAM Host : host"); pw.println("\tDiagnostics : diagnostics"); pw.close(); String appReportStr=baos.toString("UTF-8"); Assert.assertEquals(appReportStr,sysOutStream.toString()); verify(sysOut,times(1)).println(isA(String.class)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeStatus() throws Exception { NodeId nodeId=NodeId.newInstance("host0",0); NodeCLI cli=new NodeCLI(); when(client.getNodeReports()).thenReturn(getNodeReports(3,NodeState.RUNNING)); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); int result=cli.run(new String[]{"-status",nodeId.toString()}); assertEquals(0,result); verify(client).getNodeReports(); ByteArrayOutputStream baos=new ByteArrayOutputStream(); PrintWriter pw=new PrintWriter(baos); pw.println("Node Report : "); pw.println("\tNode-Id : host0:0"); pw.println("\tRack : rack1"); pw.println("\tNode-State : RUNNING"); pw.println("\tNode-Http-Address : host1:8888"); pw.println("\tLast-Health-Update : " + DateFormatUtils.format(new Date(0),"E dd/MMM/yy hh:mm:ss:SSzz")); pw.println("\tHealth-Report : "); pw.println("\tContainers : 0"); pw.println("\tMemory-Used : 0MB"); pw.println("\tMemory-Capacity : 0MB"); pw.println("\tCPU-Used : 0 vcores"); pw.println("\tCPU-Capacity : 0 vcores"); pw.close(); String nodeStatusStr=baos.toString("UTF-8"); verify(sysOut,times(1)).println(isA(String.class)); verify(sysOut).println(nodeStatusStr); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetApplications() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.RUNNING,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null); List applicationReports=new ArrayList(); applicationReports.add(newApplicationReport); ApplicationId applicationId2=ApplicationId.newInstance(1234,6); ApplicationReport newApplicationReport2=ApplicationReport.newInstance(applicationId2,ApplicationAttemptId.newInstance(applicationId2,2),"user2","queue2","appname2","host2",125,null,YarnApplicationState.FINISHED,"diagnostics2","url2",2,2,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.63789f,"NON-YARN",null); applicationReports.add(newApplicationReport2); ApplicationId applicationId3=ApplicationId.newInstance(1234,7); ApplicationReport newApplicationReport3=ApplicationReport.newInstance(applicationId3,ApplicationAttemptId.newInstance(applicationId3,3),"user3","queue3","appname3","host3",126,null,YarnApplicationState.RUNNING,"diagnostics3","url3",3,3,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.73789f,"MAPREDUCE",null); applicationReports.add(newApplicationReport3); ApplicationId applicationId4=ApplicationId.newInstance(1234,8); ApplicationReport newApplicationReport4=ApplicationReport.newInstance(applicationId4,ApplicationAttemptId.newInstance(applicationId4,4),"user4","queue4","appname4","host4",127,null,YarnApplicationState.FAILED,"diagnostics4","url4",4,4,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.83789f,"NON-MAPREDUCE",null); applicationReports.add(newApplicationReport4); ApplicationId applicationId5=ApplicationId.newInstance(1234,9); ApplicationReport newApplicationReport5=ApplicationReport.newInstance(applicationId5,ApplicationAttemptId.newInstance(applicationId5,5),"user5","queue5","appname5","host5",128,null,YarnApplicationState.ACCEPTED,"diagnostics5","url5",5,5,FinalApplicationStatus.KILLED,null,"N/A",0.93789f,"HIVE",null); applicationReports.add(newApplicationReport5); ApplicationId applicationId6=ApplicationId.newInstance(1234,10); ApplicationReport newApplicationReport6=ApplicationReport.newInstance(applicationId6,ApplicationAttemptId.newInstance(applicationId6,6),"user6","queue6","appname6","host6",129,null,YarnApplicationState.SUBMITTED,"diagnostics6","url6",6,6,FinalApplicationStatus.KILLED,null,"N/A",0.99789f,"PIG",null); applicationReports.add(newApplicationReport6); Set appType1=new HashSet(); EnumSet appState1=EnumSet.noneOf(YarnApplicationState.class); appState1.add(YarnApplicationState.RUNNING); appState1.add(YarnApplicationState.ACCEPTED); appState1.add(YarnApplicationState.SUBMITTED); when(client.getApplications(appType1,appState1)).thenReturn(getApplicationReports(applicationReports,appType1,appState1,false)); int result=cli.run(new String[]{"application","-list"}); assertEquals(0,result); verify(client).getApplications(appType1,appState1); ByteArrayOutputStream baos=new ByteArrayOutputStream(); PrintWriter pw=new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType1 + " and states: "+ appState1+ ")"+ ":"+ 4); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0005\t "); pw.print("appname\t YARN\t user\t "); pw.print("queue\t RUNNING\t "); pw.print("SUCCEEDED\t 53.79%"); pw.println("\t N/A"); pw.print(" application_1234_0007\t "); pw.print("appname3\t MAPREDUCE\t user3\t "); pw.print("queue3\t RUNNING\t "); pw.print("SUCCEEDED\t 73.79%"); pw.println("\t N/A"); pw.print(" application_1234_0009\t "); pw.print("appname5\t HIVE\t user5\t "); pw.print("queue5\t ACCEPTED\t "); pw.print("KILLED\t 93.79%"); pw.println("\t N/A"); pw.print(" application_1234_0010\t "); pw.print("appname6\t PIG\t user6\t "); pw.print("queue6\t SUBMITTED\t "); pw.print("KILLED\t 99.79%"); pw.println("\t N/A"); pw.close(); String appsReportStr=baos.toString("UTF-8"); Assert.assertEquals(appsReportStr,sysOutStream.toString()); verify(sysOut,times(1)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); Set appType2=new HashSet(); appType2.add("YARN"); appType2.add("NON-YARN"); EnumSet appState2=EnumSet.noneOf(YarnApplicationState.class); appState2.add(YarnApplicationState.RUNNING); appState2.add(YarnApplicationState.ACCEPTED); appState2.add(YarnApplicationState.SUBMITTED); when(client.getApplications(appType2,appState2)).thenReturn(getApplicationReports(applicationReports,appType2,appState2,false)); result=cli.run(new String[]{"application","-list","-appTypes","YARN, ,, NON-YARN"," ,, ,,"}); assertEquals(0,result); verify(client).getApplications(appType2,appState2); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType2 + " and states: "+ appState2+ ")"+ ":"+ 1); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0005\t "); pw.print("appname\t YARN\t user\t "); pw.print("queue\t RUNNING\t "); pw.print("SUCCEEDED\t 53.79%"); pw.println("\t N/A"); pw.close(); appsReportStr=baos.toString("UTF-8"); Assert.assertEquals(appsReportStr,sysOutStream.toString()); verify(sysOut,times(2)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); Set appType3=new HashSet(); EnumSet appState3=EnumSet.noneOf(YarnApplicationState.class); appState3.add(YarnApplicationState.FINISHED); appState3.add(YarnApplicationState.FAILED); when(client.getApplications(appType3,appState3)).thenReturn(getApplicationReports(applicationReports,appType3,appState3,false)); result=cli.run(new String[]{"application","-list","--appStates","FINISHED ,, , FAILED",",,FINISHED"}); assertEquals(0,result); verify(client).getApplications(appType3,appState3); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType3 + " and states: "+ appState3+ ")"+ ":"+ 2); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0006\t "); pw.print("appname2\t NON-YARN\t user2\t "); pw.print("queue2\t FINISHED\t "); pw.print("SUCCEEDED\t 63.79%"); pw.println("\t N/A"); pw.print(" application_1234_0008\t "); pw.print("appname4\t NON-MAPREDUCE\t user4\t "); pw.print("queue4\t FAILED\t "); pw.print("SUCCEEDED\t 83.79%"); pw.println("\t N/A"); pw.close(); appsReportStr=baos.toString("UTF-8"); Assert.assertEquals(appsReportStr,sysOutStream.toString()); verify(sysOut,times(3)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); Set appType4=new HashSet(); appType4.add("YARN"); appType4.add("NON-YARN"); EnumSet appState4=EnumSet.noneOf(YarnApplicationState.class); appState4.add(YarnApplicationState.FINISHED); appState4.add(YarnApplicationState.FAILED); when(client.getApplications(appType4,appState4)).thenReturn(getApplicationReports(applicationReports,appType4,appState4,false)); result=cli.run(new String[]{"application","-list","--appTypes","YARN,NON-YARN","--appStates","FINISHED ,, , FAILED"}); assertEquals(0,result); verify(client).getApplications(appType2,appState2); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType4 + " and states: "+ appState4+ ")"+ ":"+ 1); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0006\t "); pw.print("appname2\t NON-YARN\t user2\t "); pw.print("queue2\t FINISHED\t "); pw.print("SUCCEEDED\t 63.79%"); pw.println("\t N/A"); pw.close(); appsReportStr=baos.toString("UTF-8"); Assert.assertEquals(appsReportStr,sysOutStream.toString()); verify(sysOut,times(4)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); result=cli.run(new String[]{"application","-list","--appStates","FINISHED ,, , INVALID"}); assertEquals(-1,result); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("The application state INVALID is invalid."); pw.print("The valid application state can be one of the following: "); StringBuilder sb=new StringBuilder(); sb.append("ALL,"); for ( YarnApplicationState state : YarnApplicationState.values()) { sb.append(state + ","); } String output=sb.toString(); pw.println(output.substring(0,output.length() - 1)); pw.close(); appsReportStr=baos.toString("UTF-8"); Assert.assertEquals(appsReportStr,sysOutStream.toString()); verify(sysOut,times(4)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); Set appType5=new HashSet(); EnumSet appState5=EnumSet.noneOf(YarnApplicationState.class); appState5.add(YarnApplicationState.FINISHED); when(client.getApplications(appType5,appState5)).thenReturn(getApplicationReports(applicationReports,appType5,appState5,true)); result=cli.run(new String[]{"application","-list","--appStates","FINISHED ,, , ALL"}); assertEquals(0,result); verify(client).getApplications(appType5,appState5); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType5 + " and states: "+ appState5+ ")"+ ":"+ 6); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0005\t "); pw.print("appname\t YARN\t user\t "); pw.print("queue\t RUNNING\t "); pw.print("SUCCEEDED\t 53.79%"); pw.println("\t N/A"); pw.print(" application_1234_0006\t "); pw.print("appname2\t NON-YARN\t user2\t "); pw.print("queue2\t FINISHED\t "); pw.print("SUCCEEDED\t 63.79%"); pw.println("\t N/A"); pw.print(" application_1234_0007\t "); pw.print("appname3\t MAPREDUCE\t user3\t "); pw.print("queue3\t RUNNING\t "); pw.print("SUCCEEDED\t 73.79%"); pw.println("\t N/A"); pw.print(" application_1234_0008\t "); pw.print("appname4\t NON-MAPREDUCE\t user4\t "); pw.print("queue4\t FAILED\t "); pw.print("SUCCEEDED\t 83.79%"); pw.println("\t N/A"); pw.print(" application_1234_0009\t "); pw.print("appname5\t HIVE\t user5\t "); pw.print("queue5\t ACCEPTED\t "); pw.print("KILLED\t 93.79%"); pw.println("\t N/A"); pw.print(" application_1234_0010\t "); pw.print("appname6\t PIG\t user6\t "); pw.print("queue6\t SUBMITTED\t "); pw.print("KILLED\t 99.79%"); pw.println("\t N/A"); pw.close(); appsReportStr=baos.toString("UTF-8"); Assert.assertEquals(appsReportStr,sysOutStream.toString()); verify(sysOut,times(5)).write(any(byte[].class),anyInt(),anyInt()); sysOutStream.reset(); Set appType6=new HashSet(); appType6.add("YARN"); appType6.add("NON-YARN"); EnumSet appState6=EnumSet.noneOf(YarnApplicationState.class); appState6.add(YarnApplicationState.FINISHED); when(client.getApplications(appType6,appState6)).thenReturn(getApplicationReports(applicationReports,appType6,appState6,false)); result=cli.run(new String[]{"application","-list","-appTypes","YARN, ,, NON-YARN","--appStates","finished"}); assertEquals(0,result); verify(client).getApplications(appType6,appState6); baos=new ByteArrayOutputStream(); pw=new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType6 + " and states: "+ appState6+ ")"+ ":"+ 1); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0006\t "); pw.print("appname2\t NON-YARN\t user2\t "); pw.print("queue2\t FINISHED\t "); pw.print("SUCCEEDED\t 63.79%"); pw.println("\t N/A"); pw.close(); appsReportStr=baos.toString("UTF-8"); Assert.assertEquals(appsReportStr,sysOutStream.toString()); verify(sysOut,times(6)).write(any(byte[].class),anyInt(),anyInt()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetApplicationReport() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.FINISHED,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport); int result=cli.run(new String[]{"application","-status",applicationId.toString()}); assertEquals(0,result); verify(client).getApplicationReport(applicationId); ByteArrayOutputStream baos=new ByteArrayOutputStream(); PrintWriter pw=new PrintWriter(baos); pw.println("Application Report : "); pw.println("\tApplication-Id : application_1234_0005"); pw.println("\tApplication-Name : appname"); pw.println("\tApplication-Type : YARN"); pw.println("\tUser : user"); pw.println("\tQueue : queue"); pw.println("\tStart-Time : 0"); pw.println("\tFinish-Time : 0"); pw.println("\tProgress : 53.79%"); pw.println("\tState : FINISHED"); pw.println("\tFinal-State : SUCCEEDED"); pw.println("\tTracking-URL : N/A"); pw.println("\tRPC Port : 124"); pw.println("\tAM Host : host"); pw.println("\tDiagnostics : diagnostics"); pw.close(); String appReportStr=baos.toString("UTF-8"); Assert.assertEquals(appReportStr,sysOutStream.toString()); verify(sysOut,times(1)).println(isA(String.class)); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetContainerReport() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(applicationId,1); ContainerId containerId=ContainerId.newInstance(attemptId,1); ContainerReport container=ContainerReport.newInstance(containerId,null,NodeId.newInstance("host",1234),Priority.UNDEFINED,1234,5678,"diagnosticInfo","logURL",0,ContainerState.COMPLETE); when(client.getContainerReport(any(ContainerId.class))).thenReturn(container); int result=cli.run(new String[]{"container","-status",containerId.toString()}); assertEquals(0,result); verify(client).getContainerReport(containerId); ByteArrayOutputStream baos=new ByteArrayOutputStream(); PrintWriter pw=new PrintWriter(baos); pw.println("Container Report : "); pw.println("\tContainer-Id : container_1234_0005_01_000001"); pw.println("\tStart-Time : 1234"); pw.println("\tFinish-Time : 5678"); pw.println("\tState : COMPLETE"); pw.println("\tLOG-URL : logURL"); pw.println("\tHost : host:1234"); pw.println("\tDiagnostics : diagnosticInfo"); pw.close(); String appReportStr=baos.toString("UTF-8"); Assert.assertEquals(appReportStr,sysOutStream.toString()); verify(sysOut,times(1)).println(isA(String.class)); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMoveApplicationAcrossQueues() throws Exception { ApplicationCLI cli=createAndGetAppCLI(); ApplicationId applicationId=ApplicationId.newInstance(1234,5); ApplicationReport newApplicationReport2=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.FINISHED,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport2); int result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"}); assertEquals(0,result); verify(client,times(0)).moveApplicationAcrossQueues(any(ApplicationId.class),any(String.class)); verify(sysOut).println("Application " + applicationId + " has already finished "); ApplicationReport newApplicationReport=ApplicationReport.newInstance(applicationId,ApplicationAttemptId.newInstance(applicationId,1),"user","queue","appname","host",124,null,YarnApplicationState.RUNNING,"diagnostics","url",0,0,FinalApplicationStatus.SUCCEEDED,null,"N/A",0.53789f,"YARN",null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(newApplicationReport); result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"}); assertEquals(0,result); verify(client).moveApplicationAcrossQueues(any(ApplicationId.class),any(String.class)); verify(sysOut).println("Moving application application_1234_0005 to queue targetqueue"); verify(sysOut).println("Successfully completed move."); doThrow(new ApplicationNotFoundException("Application with id '" + applicationId + "' doesn't exist in RM.")).when(client).moveApplicationAcrossQueues(applicationId,"targetqueue"); cli=createAndGetAppCLI(); try { result=cli.run(new String[]{"application","-movetoqueue",applicationId.toString(),"-queue","targetqueue"}); Assert.fail(); } catch ( Exception ex) { Assert.assertTrue(ex instanceof ApplicationNotFoundException); Assert.assertEquals("Application with id '" + applicationId + "' doesn't exist in RM.",ex.getMessage()); } }

    Class: org.apache.hadoop.yarn.conf.TestHAUtil

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testVerifyAndSetConfiguration() throws Exception { try { HAUtil.verifyAndSetConfiguration(conf); } catch ( YarnRuntimeException e) { fail("Should not throw any exceptions."); } assertEquals("Should be saved as Trimmed collection",StringUtils.getStringCollection(RM_NODE_IDS),HAUtil.getRMHAIds(conf)); assertEquals("Should be saved as Trimmed string",RM1_NODE_ID,HAUtil.getRMHAId(conf)); for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { assertEquals("RPC address not set for " + confKey,RM1_ADDRESS,conf.get(confKey)); } conf.clear(); conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID); try { HAUtil.verifyAndSetConfiguration(conf); } catch ( YarnRuntimeException e) { assertEquals("YarnRuntimeException by verifyAndSetRMHAIds()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getInvalidValueMessage(YarnConfiguration.RM_HA_IDS,conf.get(YarnConfiguration.RM_HA_IDS) + "\nHA mode requires atleast two RMs"),e.getMessage()); } conf.clear(); conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID); for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { conf.set(HAUtil.addSuffix(confKey,RM1_NODE_ID),RM1_ADDRESS); conf.set(HAUtil.addSuffix(confKey,RM2_NODE_ID),RM2_ADDRESS); } try { HAUtil.verifyAndSetConfiguration(conf); } catch ( YarnRuntimeException e) { assertEquals("YarnRuntimeException by getRMId()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getNeedToSetValueMessage(YarnConfiguration.RM_HA_ID),e.getMessage()); } conf.clear(); conf.set(YarnConfiguration.RM_HA_ID,RM_INVALID_NODE_ID); conf.set(YarnConfiguration.RM_HA_IDS,RM_INVALID_NODE_ID + "," + RM1_NODE_ID); for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { conf.set(confKey + RM_INVALID_NODE_ID,RM_INVALID_NODE_ID); } try { HAUtil.verifyAndSetConfiguration(conf); } catch ( YarnRuntimeException e) { assertEquals("YarnRuntimeException by addSuffix()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getInvalidValueMessage(YarnConfiguration.RM_HA_ID,RM_INVALID_NODE_ID),e.getMessage()); } conf.clear(); conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID); conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID); try { HAUtil.verifyAndSetConfiguration(conf); fail("Should throw YarnRuntimeException. by Configuration#set()"); } catch ( YarnRuntimeException e) { String confKey=HAUtil.addSuffix(YarnConfiguration.RM_ADDRESS,RM1_NODE_ID); assertEquals("YarnRuntimeException by Configuration#set()",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getNeedToSetValueMessage(HAUtil.addSuffix(YarnConfiguration.RM_HOSTNAME,RM1_NODE_ID) + " or " + confKey),e.getMessage()); } conf.clear(); conf.set(YarnConfiguration.RM_HA_IDS,RM2_NODE_ID + "," + RM3_NODE_ID); conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID_UNTRIMMED); for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { conf.set(HAUtil.addSuffix(confKey,RM1_NODE_ID),RM1_ADDRESS_UNTRIMMED); conf.set(HAUtil.addSuffix(confKey,RM2_NODE_ID),RM2_ADDRESS); conf.set(HAUtil.addSuffix(confKey,RM3_NODE_ID),RM3_ADDRESS); } try { HAUtil.verifyAndSetConfiguration(conf); } catch ( YarnRuntimeException e) { assertEquals("YarnRuntimeException by getRMId()'s validation",HAUtil.BAD_CONFIG_MESSAGE_PREFIX + HAUtil.getRMHAIdNeedToBeIncludedMessage("[rm2, rm3]",RM1_NODE_ID),e.getMessage()); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetRMServiceId() throws Exception { conf.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM2_NODE_ID); Collection rmhaIds=HAUtil.getRMHAIds(conf); assertEquals(2,rmhaIds.size()); String[] ids=rmhaIds.toArray(new String[0]); assertEquals(RM1_NODE_ID,ids[0]); assertEquals(RM2_NODE_ID,ids[1]); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetRMId() throws Exception { conf.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID); assertEquals("Does not honor " + YarnConfiguration.RM_HA_ID,RM1_NODE_ID,HAUtil.getRMHAId(conf)); conf.clear(); assertNull("Return null when " + YarnConfiguration.RM_HA_ID + " is not set",HAUtil.getRMHAId(conf)); }

    Class: org.apache.hadoop.yarn.conf.TestYarnConfiguration

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetSocketAddressForNMWithHA(){ YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.NM_ADDRESS,"0.0.0.0:1234"); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_ID,"rm1"); assertTrue(HAUtil.isHAEnabled(conf)); InetSocketAddress addr=conf.getSocketAddr(YarnConfiguration.NM_ADDRESS,YarnConfiguration.DEFAULT_NM_ADDRESS,YarnConfiguration.DEFAULT_NM_PORT); assertEquals(1234,addr.getPort()); }

    APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @Test public void testRMWebUrlSpecified() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"fortesting:24543"); conf.set(YarnConfiguration.RM_ADDRESS,"rmtesting:9999"); String rmWebUrl=WebAppUtils.getRMWebAppURLWithScheme(conf); String[] parts=rmWebUrl.split(":"); Assert.assertEquals("RM Web URL Port is incrrect",24543,Integer.valueOf(parts[parts.length - 1]).intValue()); Assert.assertNotSame("RM Web Url not resolved correctly. Should not be rmtesting","http://rmtesting:24543",rmWebUrl); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetSocketAddr() throws Exception { YarnConfiguration conf; InetSocketAddress resourceTrackerAddress; conf=new YarnConfiguration(); resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); assertEquals(new InetSocketAddress(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),resourceTrackerAddress); conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"10.0.0.1"); resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); assertEquals(new InetSocketAddress("10.0.0.1",YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),resourceTrackerAddress); conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"10.0.0.2:5001"); resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); assertEquals(new InetSocketAddress("10.0.0.2",5001),resourceTrackerAddress); conf=new YarnConfiguration(); conf.set(YarnConfiguration.RM_BIND_HOST,"10.0.0.3"); resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); assertEquals(new InetSocketAddress("10.0.0.3",YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),resourceTrackerAddress); conf.set(YarnConfiguration.RM_BIND_HOST,"0.0.0.0"); conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"10.0.0.2"); resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); assertEquals(new InetSocketAddress("0.0.0.0",YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),resourceTrackerAddress); conf.set(YarnConfiguration.RM_BIND_HOST,"0.0.0.0"); conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,"10.0.0.2:5003"); resourceTrackerAddress=conf.getSocketAddr(YarnConfiguration.RM_BIND_HOST,YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); assertEquals(new InetSocketAddress("0.0.0.0",5003),resourceTrackerAddress); }

    Class: org.apache.hadoop.yarn.logaggregation.TestAggregatedLogFormat

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testReadAcontainerLogs1() throws Exception { Configuration conf=new Configuration(); File workDir=new File(testWorkDir,"testReadAcontainerLogs1"); Path remoteAppLogFile=new Path(workDir.getAbsolutePath(),"aggregatedLogFile"); Path srcFileRoot=new Path(workDir.getAbsolutePath(),"srcFiles"); ContainerId testContainerId=TestContainerId.newContainerId(1,1,1,1); Path t=new Path(srcFileRoot,testContainerId.getApplicationAttemptId().getApplicationId().toString()); Path srcFilePath=new Path(t,testContainerId.toString()); int numChars=80000; writeSrcFile(srcFilePath,"stdout",numChars); UserGroupInformation ugi=UserGroupInformation.getCurrentUser(); LogWriter logWriter=new LogWriter(conf,remoteAppLogFile,ugi); LogKey logKey=new LogKey(testContainerId); LogValue logValue=new LogValue(Collections.singletonList(srcFileRoot.toString()),testContainerId,ugi.getShortUserName()); logWriter.append(logKey,logValue); logWriter.close(); FileStatus fsStatus=fs.getFileStatus(remoteAppLogFile); Assert.assertEquals("permissions on log aggregation file are wrong",FsPermission.createImmutable((short)0640),fsStatus.getPermission()); LogReader logReader=new LogReader(conf,remoteAppLogFile); LogKey rLogKey=new LogKey(); DataInputStream dis=logReader.next(rLogKey); Writer writer=new StringWriter(); LogReader.readAcontainerLogs(dis,writer); String s=writer.toString(); int expectedLength="\n\nLogType:stdout".length() + ("\nLogLength:" + numChars).length() + "\nLog Contents:\n".length()+ numChars; Assert.assertTrue("LogType not matched",s.contains("LogType:stdout")); Assert.assertTrue("LogLength not matched",s.contains("LogLength:" + numChars)); Assert.assertTrue("Log Contents not matched",s.contains("Log Contents")); StringBuilder sb=new StringBuilder(); for (int i=0; i < numChars; i++) { sb.append(filler); } String expectedContent=sb.toString(); Assert.assertTrue("Log content incorrect",s.contains(expectedContent)); Assert.assertEquals(expectedLength,s.length()); }

    Class: org.apache.hadoop.yarn.server.TestDiskFailures

    InternalCallVerifier EqualityVerifier 
    /** * Make a local and log directory inaccessible during initialization * and verify those bad directories are recognized and removed from * the list of available local and log directories. * @throws IOException */ @Test public void testDirFailuresOnStartup() throws IOException { Configuration conf=new YarnConfiguration(); String localDir1=new File(testDir,"localDir1").getPath(); String localDir2=new File(testDir,"localDir2").getPath(); String logDir1=new File(testDir,"logDir1").getPath(); String logDir2=new File(testDir,"logDir2").getPath(); conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir1 + "," + localDir2); conf.set(YarnConfiguration.NM_LOG_DIRS,logDir1 + "," + logDir2); prepareDirToFail(localDir1); prepareDirToFail(logDir2); LocalDirsHandlerService dirSvc=new LocalDirsHandlerService(); dirSvc.init(conf); List localDirs=dirSvc.getLocalDirs(); Assert.assertEquals(1,localDirs.size()); Assert.assertEquals(new Path(localDir2).toString(),localDirs.get(0)); List logDirs=dirSvc.getLogDirs(); Assert.assertEquals(1,logDirs.size()); Assert.assertEquals(new Path(logDir1).toString(),logDirs.get(0)); }

    Class: org.apache.hadoop.yarn.server.api.protocolrecords.TestProtocolRecords

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testRegisterNodeManagerRequest(){ ApplicationId appId=ApplicationId.newInstance(123456789,1); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(attemptId,1); NMContainerStatus containerReport=NMContainerStatus.newInstance(containerId,ContainerState.RUNNING,Resource.newInstance(1024,1),"diagnostics",0,Priority.newInstance(10),1234); List reports=Arrays.asList(containerReport); RegisterNodeManagerRequest request=RegisterNodeManagerRequest.newInstance(NodeId.newInstance("1.1.1.1",1000),8080,Resource.newInstance(1024,1),"NM-version-id",reports,Arrays.asList(appId)); RegisterNodeManagerRequest requestProto=new RegisterNodeManagerRequestPBImpl(((RegisterNodeManagerRequestPBImpl)request).getProto()); Assert.assertEquals(containerReport,requestProto.getNMContainerStatuses().get(0)); Assert.assertEquals(8080,requestProto.getHttpPort()); Assert.assertEquals("NM-version-id",requestProto.getNMVersion()); Assert.assertEquals(NodeId.newInstance("1.1.1.1",1000),requestProto.getNodeId()); Assert.assertEquals(Resource.newInstance(1024,1),requestProto.getResource()); Assert.assertEquals(1,requestProto.getRunningApplications().size()); Assert.assertEquals(appId,requestProto.getRunningApplications().get(0)); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNMContainerStatus(){ ApplicationId appId=ApplicationId.newInstance(123456789,1); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(attemptId,1); Resource resource=Resource.newInstance(1000,200); NMContainerStatus report=NMContainerStatus.newInstance(containerId,ContainerState.COMPLETE,resource,"diagnostics",ContainerExitStatus.ABORTED,Priority.newInstance(10),1234); NMContainerStatus reportProto=new NMContainerStatusPBImpl(((NMContainerStatusPBImpl)report).getProto()); Assert.assertEquals("diagnostics",reportProto.getDiagnostics()); Assert.assertEquals(resource,reportProto.getAllocatedResource()); Assert.assertEquals(ContainerExitStatus.ABORTED,reportProto.getContainerExitStatus()); Assert.assertEquals(ContainerState.COMPLETE,reportProto.getContainerState()); Assert.assertEquals(containerId,reportProto.getContainerId()); Assert.assertEquals(Priority.newInstance(10),reportProto.getPriority()); Assert.assertEquals(1234,reportProto.getCreationTime()); }

    Class: org.apache.hadoop.yarn.server.api.protocolrecords.TestRegisterNodeManagerRequest

    InternalCallVerifier EqualityVerifier 
    @Test public void testRegisterNodeManagerRequest(){ RegisterNodeManagerRequest request=RegisterNodeManagerRequest.newInstance(NodeId.newInstance("host",1234),1234,Resource.newInstance(0,0),"version",Arrays.asList(NMContainerStatus.newInstance(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(1234L,1),1),1),ContainerState.RUNNING,Resource.newInstance(1024,1),"good",-1,Priority.newInstance(0),1234)),Arrays.asList(ApplicationId.newInstance(1234L,1),ApplicationId.newInstance(1234L,2))); RegisterNodeManagerRequest request1=new RegisterNodeManagerRequestPBImpl(((RegisterNodeManagerRequestPBImpl)request).getProto()); Assert.assertEquals(request1.getNMContainerStatuses().size(),request.getNMContainerStatuses().size()); Assert.assertEquals(request1.getNMContainerStatuses().get(0).getContainerId(),request.getNMContainerStatuses().get(0).getContainerId()); Assert.assertEquals(request1.getRunningApplications().size(),request.getRunningApplications().size()); Assert.assertEquals(request1.getRunningApplications().get(0),request.getRunningApplications().get(0)); Assert.assertEquals(request1.getRunningApplications().get(1),request.getRunningApplications().get(1)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRegisterNodeManagerRequestWithNullArrays(){ RegisterNodeManagerRequest request=RegisterNodeManagerRequest.newInstance(NodeId.newInstance("host",1234),1234,Resource.newInstance(0,0),"version",null,null); RegisterNodeManagerRequest request1=new RegisterNodeManagerRequestPBImpl(((RegisterNodeManagerRequestPBImpl)request).getProto()); Assert.assertEquals(0,request1.getNMContainerStatuses().size()); Assert.assertEquals(0,request1.getRunningApplications().size()); }

    Class: org.apache.hadoop.yarn.server.api.protocolrecords.TestRegisterNodeManagerResponse

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testRoundTrip() throws Exception { RegisterNodeManagerResponse resp=recordFactory.newRecordInstance(RegisterNodeManagerResponse.class); byte b[]={0,1,2,3,4,5}; MasterKey containerTokenMK=recordFactory.newRecordInstance(MasterKey.class); containerTokenMK.setKeyId(54321); containerTokenMK.setBytes(ByteBuffer.wrap(b)); resp.setContainerTokenMasterKey(containerTokenMK); MasterKey nmTokenMK=recordFactory.newRecordInstance(MasterKey.class); nmTokenMK.setKeyId(12345); nmTokenMK.setBytes(ByteBuffer.wrap(b)); resp.setNMTokenMasterKey(nmTokenMK); resp.setNodeAction(NodeAction.NORMAL); assertEquals(NodeAction.NORMAL,resp.getNodeAction()); assertNotNull(resp.getContainerTokenMasterKey()); assertEquals(54321,resp.getContainerTokenMasterKey().getKeyId()); assertArrayEquals(b,resp.getContainerTokenMasterKey().getBytes().array()); RegisterNodeManagerResponse respCopy=serDe(resp); assertEquals(NodeAction.NORMAL,respCopy.getNodeAction()); assertNotNull(respCopy.getContainerTokenMasterKey()); assertEquals(54321,respCopy.getContainerTokenMasterKey().getKeyId()); assertArrayEquals(b,respCopy.getContainerTokenMasterKey().getBytes().array()); assertNotNull(resp.getNMTokenMasterKey()); assertEquals(12345,resp.getNMTokenMasterKey().getKeyId()); assertArrayEquals(b,resp.getNMTokenMasterKey().getBytes().array()); respCopy=serDe(resp); assertEquals(NodeAction.NORMAL,respCopy.getNodeAction()); assertNotNull(respCopy.getNMTokenMasterKey()); assertEquals(12345,respCopy.getNMTokenMasterKey().getKeyId()); assertArrayEquals(b,respCopy.getNMTokenMasterKey().getBytes().array()); }

    Class: org.apache.hadoop.yarn.server.applicationhistoryservice.TestApplicationHistoryClientService

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testApplications() throws IOException, YarnException { ApplicationId appId=null; appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); writeApplicationFinishData(appId); ApplicationId appId1=ApplicationId.newInstance(0,2); writeApplicationStartData(appId1); writeApplicationFinishData(appId1); GetApplicationsRequest request=GetApplicationsRequest.newInstance(); GetApplicationsResponse response=historyServer.getClientService().getClientHandler().getApplications(request); List appReport=response.getApplicationList(); Assert.assertNotNull(appReport); Assert.assertEquals(appId,appReport.get(0).getApplicationId()); Assert.assertEquals(appId1,appReport.get(1).getApplicationId()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainers() throws IOException, YarnException { ApplicationId appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); ContainerId containerId1=ContainerId.newInstance(appAttemptId,2); writeContainerStartData(containerId); writeContainerFinishData(containerId); writeContainerStartData(containerId1); writeContainerFinishData(containerId1); writeApplicationFinishData(appId); GetContainersRequest request=GetContainersRequest.newInstance(appAttemptId); GetContainersResponse response=historyServer.getClientService().getClientHandler().getContainers(request); List containers=response.getContainerList(); Assert.assertNotNull(containers); Assert.assertEquals(containerId,containers.get(1).getContainerId()); Assert.assertEquals(containerId1,containers.get(0).getContainerId()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testApplicationAttemptReport() throws IOException, YarnException { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); writeApplicationAttemptStartData(appAttemptId); writeApplicationAttemptFinishData(appAttemptId); GetApplicationAttemptReportRequest request=GetApplicationAttemptReportRequest.newInstance(appAttemptId); GetApplicationAttemptReportResponse response=historyServer.getClientService().getClientHandler().getApplicationAttemptReport(request); ApplicationAttemptReport attemptReport=response.getApplicationAttemptReport(); Assert.assertNotNull(attemptReport); Assert.assertEquals("appattempt_0_0001_000001",attemptReport.getApplicationAttemptId().toString()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testApplicationAttempts() throws IOException, YarnException { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ApplicationAttemptId appAttemptId1=ApplicationAttemptId.newInstance(appId,2); writeApplicationAttemptStartData(appAttemptId); writeApplicationAttemptFinishData(appAttemptId); writeApplicationAttemptStartData(appAttemptId1); writeApplicationAttemptFinishData(appAttemptId1); GetApplicationAttemptsRequest request=GetApplicationAttemptsRequest.newInstance(appId); GetApplicationAttemptsResponse response=historyServer.getClientService().getClientHandler().getApplicationAttempts(request); List attemptReports=response.getApplicationAttemptList(); Assert.assertNotNull(attemptReports); Assert.assertEquals(appAttemptId,attemptReports.get(0).getApplicationAttemptId()); Assert.assertEquals(appAttemptId1,attemptReports.get(1).getApplicationAttemptId()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testApplicationReport() throws IOException, YarnException { ApplicationId appId=null; appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); writeApplicationFinishData(appId); GetApplicationReportRequest request=GetApplicationReportRequest.newInstance(appId); GetApplicationReportResponse response=historyServer.getClientService().getClientHandler().getApplicationReport(request); ApplicationReport appReport=response.getApplicationReport(); Assert.assertNotNull(appReport); Assert.assertEquals("application_0_0001",appReport.getApplicationId().toString()); Assert.assertEquals("test type",appReport.getApplicationType().toString()); Assert.assertEquals("test queue",appReport.getQueue().toString()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerReport() throws IOException, YarnException { ApplicationId appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); writeContainerStartData(containerId); writeContainerFinishData(containerId); writeApplicationFinishData(appId); GetContainerReportRequest request=GetContainerReportRequest.newInstance(containerId); GetContainerReportResponse response=historyServer.getClientService().getClientHandler().getContainerReport(request); ContainerReport container=response.getContainerReport(); Assert.assertNotNull(container); Assert.assertEquals(containerId,container.getContainerId()); Assert.assertEquals(expectedLogUrl,container.getLogUrl()); }

    Class: org.apache.hadoop.yarn.server.applicationhistoryservice.TestApplicationHistoryManagerImpl

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testApplicationReport() throws IOException, YarnException { ApplicationId appId=null; appId=ApplicationId.newInstance(0,1); writeApplicationStartData(appId); writeApplicationFinishData(appId); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); writeApplicationAttemptStartData(appAttemptId); writeApplicationAttemptFinishData(appAttemptId); ApplicationReport appReport=applicationHistoryManagerImpl.getApplication(appId); Assert.assertNotNull(appReport); Assert.assertEquals(appId,appReport.getApplicationId()); Assert.assertEquals(appAttemptId,appReport.getCurrentApplicationAttemptId()); Assert.assertEquals(appAttemptId.toString(),appReport.getHost()); Assert.assertEquals("test type",appReport.getApplicationType().toString()); Assert.assertEquals("test queue",appReport.getQueue().toString()); }

    Class: org.apache.hadoop.yarn.server.applicationhistoryservice.TestApplicationHistoryServer

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=50000) public void testStartStopServer() throws Exception { historyServer=new ApplicationHistoryServer(); Configuration config=new YarnConfiguration(); historyServer.init(config); assertEquals(STATE.INITED,historyServer.getServiceState()); assertEquals(4,historyServer.getServices().size()); ApplicationHistoryClientService historyService=historyServer.getClientService(); assertNotNull(historyServer.getClientService()); assertEquals(STATE.INITED,historyService.getServiceState()); historyServer.start(); assertEquals(STATE.STARTED,historyServer.getServiceState()); assertEquals(STATE.STARTED,historyService.getServiceState()); historyServer.stop(); assertEquals(STATE.STOPPED,historyServer.getServiceState()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=50000) public void testFilteOverrides() throws Exception { String[] filterInitializers={AuthenticationFilterInitializer.class.getName(),TimelineAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + "," + TimelineAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + ", " + TimelineAuthenticationFilterInitializer.class.getName()}; for ( String filterInitializer : filterInitializers) { historyServer=new ApplicationHistoryServer(); Configuration config=new YarnConfiguration(); config.set("hadoop.http.filter.initializers",filterInitializer); historyServer.init(config); historyServer.start(); Configuration tmp=historyServer.getConfig(); assertEquals(TimelineAuthenticationFilterInitializer.class.getName(),tmp.get("hadoop.http.filter.initializers")); historyServer.stop(); AHSWebApp.resetInstance(); } }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testLaunch() throws Exception { ExitUtil.disableSystemExit(); try { historyServer=ApplicationHistoryServer.launchAppHistoryServer(new String[0]); } catch ( ExitUtil.ExitException e) { assertEquals(0,e.status); ExitUtil.resetFirstExitException(); fail(); } }

    Class: org.apache.hadoop.yarn.server.applicationhistoryservice.TestMemoryApplicationHistoryStore

    APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testReadWriteApplicationAttemptHistory() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); try { writeApplicationAttemptFinishData(appAttemptId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is stored before the start information")); } int numAppAttempts=5; writeApplicationStartData(appId); for (int i=1; i <= numAppAttempts; ++i) { appAttemptId=ApplicationAttemptId.newInstance(appId,i); writeApplicationAttemptStartData(appAttemptId); writeApplicationAttemptFinishData(appAttemptId); } Assert.assertEquals(numAppAttempts,store.getApplicationAttempts(appId).size()); for (int i=1; i <= numAppAttempts; ++i) { appAttemptId=ApplicationAttemptId.newInstance(appId,i); ApplicationAttemptHistoryData data=store.getApplicationAttempt(appAttemptId); Assert.assertNotNull(data); Assert.assertEquals(appAttemptId.toString(),data.getHost()); Assert.assertEquals(appAttemptId.toString(),data.getDiagnosticsInfo()); } writeApplicationFinishData(appId); appAttemptId=ApplicationAttemptId.newInstance(appId,1); try { writeApplicationAttemptStartData(appAttemptId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } try { writeApplicationAttemptFinishData(appAttemptId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } }

    APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testReadWriteApplicationHistory() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); try { writeApplicationFinishData(appId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is stored before the start information")); } int numApps=5; for (int i=1; i <= numApps; ++i) { appId=ApplicationId.newInstance(0,i); writeApplicationStartData(appId); writeApplicationFinishData(appId); } Assert.assertEquals(numApps,store.getAllApplications().size()); for (int i=1; i <= numApps; ++i) { appId=ApplicationId.newInstance(0,i); ApplicationHistoryData data=store.getApplication(appId); Assert.assertNotNull(data); Assert.assertEquals(appId.toString(),data.getApplicationName()); Assert.assertEquals(appId.toString(),data.getDiagnosticsInfo()); } appId=ApplicationId.newInstance(0,1); try { writeApplicationStartData(appId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } try { writeApplicationFinishData(appId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } }

    APIUtilityVerifier IterativeVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testReadWriteContainerHistory() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); try { writeContainerFinishData(containerId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is stored before the start information")); } writeApplicationAttemptStartData(appAttemptId); int numContainers=5; for (int i=1; i <= numContainers; ++i) { containerId=ContainerId.newInstance(appAttemptId,i); writeContainerStartData(containerId); writeContainerFinishData(containerId); } Assert.assertEquals(numContainers,store.getContainers(appAttemptId).size()); for (int i=1; i <= numContainers; ++i) { containerId=ContainerId.newInstance(appAttemptId,i); ContainerHistoryData data=store.getContainer(containerId); Assert.assertNotNull(data); Assert.assertEquals(Priority.newInstance(containerId.getId()),data.getPriority()); Assert.assertEquals(containerId.toString(),data.getDiagnosticsInfo()); } ContainerHistoryData masterContainer=store.getAMContainer(appAttemptId); Assert.assertNotNull(masterContainer); Assert.assertEquals(ContainerId.newInstance(appAttemptId,1),masterContainer.getContainerId()); writeApplicationAttemptFinishData(appAttemptId); containerId=ContainerId.newInstance(appAttemptId,1); try { writeContainerStartData(containerId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } try { writeContainerFinishData(containerId); Assert.fail(); } catch ( IOException e) { Assert.assertTrue(e.getMessage().contains("is already stored")); } }

    Class: org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TestAHSWebApp

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppControllerIndex() throws Exception { ApplicationHistoryManager ahManager=mock(ApplicationHistoryManager.class); Injector injector=WebAppTests.createMockInjector(ApplicationHistoryManager.class,ahManager); AHSController controller=injector.getInstance(AHSController.class); controller.index(); Assert.assertEquals("Application History",controller.get(TITLE,"unknown")); }

    Class: org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.TestAHSWebServices

    InternalCallVerifier EqualityVerifier 
    @Test public void testSingleApp() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject app=json.getJSONObject("app"); assertEquals(appId.toString(),app.getString("appId")); assertEquals(appId.toString(),app.get("name")); assertEquals(appId.toString(),app.get("diagnosticsInfo")); assertEquals("test queue",app.get("queue")); assertEquals("test user",app.get("user")); assertEquals("test type",app.get("type")); assertEquals(FinalApplicationStatus.UNDEFINED.toString(),app.get("finalAppStatus")); assertEquals(YarnApplicationState.FINISHED.toString(),app.get("appState")); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testMultipleAttempts() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject appAttempts=json.getJSONObject("appAttempts"); assertEquals("incorrect number of elements",1,appAttempts.length()); JSONArray array=appAttempts.getJSONArray("appAttempt"); assertEquals("incorrect number of elements",5,array.length()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testMultipleContainers() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").path(appAttemptId.toString()).path("containers").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject containers=json.getJSONObject("containers"); assertEquals("incorrect number of elements",1,containers.length()); JSONArray array=containers.getJSONArray("container"); assertEquals("incorrect number of elements",5,array.length()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQuery() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").queryParam("state",YarnApplicationState.FINISHED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",5,array.length()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleContainer() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId containerId=ContainerId.newInstance(appAttemptId,1); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").path(appAttemptId.toString()).path("containers").path(containerId.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject container=json.getJSONObject("container"); assertEquals(containerId.toString(),container.getString("containerId")); assertEquals(containerId.toString(),container.getString("diagnosticsInfo")); assertEquals("0",container.getString("allocatedMB")); assertEquals("0",container.getString("allocatedVCores")); assertEquals(NodeId.newInstance("localhost",0).toString(),container.getString("assignedNodeId")); assertEquals(Priority.newInstance(containerId.getId()).toString(),container.getString("priority")); Configuration conf=new YarnConfiguration(); assertEquals(WebAppUtils.getHttpSchemePrefix(conf) + WebAppUtils.getAHSWebAppURLWithoutScheme(conf) + "/applicationhistory/logs/localhost:0/container_0_0001_01_000001/"+ "container_0_0001_01_000001/test user",container.getString("logUrl")); assertEquals(ContainerState.COMPLETE.toString(),container.getString("containerState")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleAttempt() throws Exception { ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("applicationhistory").path("apps").path(appId.toString()).path("appattempts").path(appAttemptId.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject appAttempt=json.getJSONObject("appAttempt"); assertEquals(appAttemptId.toString(),appAttempt.getString("appAttemptId")); assertEquals(appAttemptId.toString(),appAttempt.getString("host")); assertEquals(appAttemptId.toString(),appAttempt.getString("diagnosticsInfo")); assertEquals("test tracking url",appAttempt.getString("trackingUrl")); assertEquals(YarnApplicationAttemptState.FINISHED.toString(),appAttempt.get("appAttemptState")); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidUri2() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidAccept() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("applicationhistory").accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidUri() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("applicationhistory").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestContainerExecutor

    APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testRunCommandwithPriority() throws Exception { Configuration conf=new Configuration(); conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,2); String[] command=ContainerExecutor.getRunCommand("echo","group1",conf); if (Shell.WINDOWS) { assertEquals("first command should be the run command for the platform",Shell.WINUTILS,command[0]); } else { assertEquals("first command should be nice","nice",command[0]); assertEquals("second command should be -n","-n",command[1]); assertEquals("third command should be the priority",Integer.toString(2),command[2]); } conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,-5); command=ContainerExecutor.getRunCommand("echo","group1",conf); if (Shell.WINDOWS) { assertEquals("first command should be the run command for the platform",Shell.WINUTILS,command[0]); } else { assertEquals("first command should be nice","nice",command[0]); assertEquals("second command should be -n","-n",command[1]); assertEquals("third command should be the priority",Integer.toString(-5),command[2]); } }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestDefaultContainerExecutor

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testDirPermissions() throws Exception { deleteTmpFiles(); final String user="somebody"; final String appId="app_12345_123"; final FsPermission userCachePerm=new FsPermission(DefaultContainerExecutor.USER_PERM); final FsPermission appCachePerm=new FsPermission(DefaultContainerExecutor.APPCACHE_PERM); final FsPermission fileCachePerm=new FsPermission(DefaultContainerExecutor.FILECACHE_PERM); final FsPermission appDirPerm=new FsPermission(DefaultContainerExecutor.APPDIR_PERM); final FsPermission logDirPerm=new FsPermission(DefaultContainerExecutor.LOGDIR_PERM); List localDirs=new ArrayList(); localDirs.add(new Path(BASE_TMP_PATH,"localDirA").toString()); localDirs.add(new Path(BASE_TMP_PATH,"localDirB").toString()); List logDirs=new ArrayList(); logDirs.add(new Path(BASE_TMP_PATH,"logDirA").toString()); logDirs.add(new Path(BASE_TMP_PATH,"logDirB").toString()); Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077"); FileContext lfs=FileContext.getLocalFSFileContext(conf); DefaultContainerExecutor executor=new DefaultContainerExecutor(lfs); executor.init(); try { executor.createUserLocalDirs(localDirs,user); executor.createUserCacheDirs(localDirs,user); executor.createAppDirs(localDirs,user,appId); for ( String dir : localDirs) { FileStatus stats=lfs.getFileStatus(new Path(new Path(dir,ContainerLocalizer.USERCACHE),user)); Assert.assertEquals(userCachePerm,stats.getPermission()); } for ( String dir : localDirs) { Path userCachePath=new Path(new Path(dir,ContainerLocalizer.USERCACHE),user); Path appCachePath=new Path(userCachePath,ContainerLocalizer.APPCACHE); FileStatus stats=lfs.getFileStatus(appCachePath); Assert.assertEquals(appCachePerm,stats.getPermission()); stats=lfs.getFileStatus(new Path(userCachePath,ContainerLocalizer.FILECACHE)); Assert.assertEquals(fileCachePerm,stats.getPermission()); stats=lfs.getFileStatus(new Path(appCachePath,appId)); Assert.assertEquals(appDirPerm,stats.getPermission()); } executor.createAppLogDirs(appId,logDirs); for ( String dir : logDirs) { FileStatus stats=lfs.getFileStatus(new Path(dir,appId)); Assert.assertEquals(logDirPerm,stats.getPermission()); } } finally { deleteTmpFiles(); } }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestDirectoryCollection

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testDiskSpaceUtilizationLimit() throws IOException { String dirA=new File(testDir,"dirA").getPath(); String[] dirs={dirA}; DirectoryCollection dc=new DirectoryCollection(dirs,0.0F); dc.checkDirs(); Assert.assertEquals(0,dc.getGoodDirs().size()); Assert.assertEquals(1,dc.getFailedDirs().size()); dc=new DirectoryCollection(dirs,100.0F); dc.checkDirs(); Assert.assertEquals(1,dc.getGoodDirs().size()); Assert.assertEquals(0,dc.getFailedDirs().size()); dc=new DirectoryCollection(dirs,testDir.getTotalSpace() / (1024 * 1024)); dc.checkDirs(); Assert.assertEquals(0,dc.getGoodDirs().size()); Assert.assertEquals(1,dc.getFailedDirs().size()); dc=new DirectoryCollection(dirs,100.0F,0); dc.checkDirs(); Assert.assertEquals(1,dc.getGoodDirs().size()); Assert.assertEquals(0,dc.getFailedDirs().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testDiskLimitsCutoffSetters(){ String[] dirs={"dir"}; DirectoryCollection dc=new DirectoryCollection(dirs,0.0F,100); float testValue=57.5F; float delta=0.1F; dc.setDiskUtilizationPercentageCutoff(testValue); Assert.assertEquals(testValue,dc.getDiskUtilizationPercentageCutoff(),delta); testValue=-57.5F; dc.setDiskUtilizationPercentageCutoff(testValue); Assert.assertEquals(0.0F,dc.getDiskUtilizationPercentageCutoff(),delta); testValue=157.5F; dc.setDiskUtilizationPercentageCutoff(testValue); Assert.assertEquals(100.0F,dc.getDiskUtilizationPercentageCutoff(),delta); long spaceValue=57; dc.setDiskUtilizationSpaceCutoff(spaceValue); Assert.assertEquals(spaceValue,dc.getDiskUtilizationSpaceCutoff()); spaceValue=-57; dc.setDiskUtilizationSpaceCutoff(spaceValue); Assert.assertEquals(0,dc.getDiskUtilizationSpaceCutoff()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testConstructors(){ String[] dirs={"dir"}; float delta=0.1F; DirectoryCollection dc=new DirectoryCollection(dirs); Assert.assertEquals(100.0F,dc.getDiskUtilizationPercentageCutoff(),delta); Assert.assertEquals(0,dc.getDiskUtilizationSpaceCutoff()); dc=new DirectoryCollection(dirs,57.5F); Assert.assertEquals(57.5F,dc.getDiskUtilizationPercentageCutoff(),delta); Assert.assertEquals(0,dc.getDiskUtilizationSpaceCutoff()); dc=new DirectoryCollection(dirs,57); Assert.assertEquals(100.0F,dc.getDiskUtilizationPercentageCutoff(),delta); Assert.assertEquals(57,dc.getDiskUtilizationSpaceCutoff()); dc=new DirectoryCollection(dirs,57.5F,67); Assert.assertEquals(57.5F,dc.getDiskUtilizationPercentageCutoff(),delta); Assert.assertEquals(67,dc.getDiskUtilizationSpaceCutoff()); dc=new DirectoryCollection(dirs,-57.5F,-67); Assert.assertEquals(0.0F,dc.getDiskUtilizationPercentageCutoff(),delta); Assert.assertEquals(0,dc.getDiskUtilizationSpaceCutoff()); dc=new DirectoryCollection(dirs,157.5F,-67); Assert.assertEquals(100.0F,dc.getDiskUtilizationPercentageCutoff(),delta); Assert.assertEquals(0,dc.getDiskUtilizationSpaceCutoff()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testCreateDirectories() throws IOException { Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077"); FileContext localFs=FileContext.getLocalFSFileContext(conf); String dirA=new File(testDir,"dirA").getPath(); String dirB=new File(dirA,"dirB").getPath(); String dirC=new File(testDir,"dirC").getPath(); Path pathC=new Path(dirC); FsPermission permDirC=new FsPermission((short)0710); localFs.mkdir(pathC,null,true); localFs.setPermission(pathC,permDirC); String[] dirs={dirA,dirB,dirC}; DirectoryCollection dc=new DirectoryCollection(dirs,conf.getFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE)); FsPermission defaultPerm=FsPermission.getDefault().applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK)); boolean createResult=dc.createNonExistentDirs(localFs,defaultPerm); Assert.assertTrue(createResult); FileStatus status=localFs.getFileStatus(new Path(dirA)); Assert.assertEquals("local dir parent not created with proper permissions",defaultPerm,status.getPermission()); status=localFs.getFileStatus(new Path(dirB)); Assert.assertEquals("local dir not created with proper permissions",defaultPerm,status.getPermission()); status=localFs.getFileStatus(pathC); Assert.assertEquals("existing local directory permissions modified",permDirC,status.getPermission()); }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestLinuxContainerExecutor

    APIUtilityVerifier EqualityVerifier 
    @Test public void testContainerLaunch() throws IOException { if (!shouldRun()) { return; } File touchFile=new File(workSpace,"touch-file"); int ret=runAndBlock("touch",touchFile.getAbsolutePath()); assertEquals(0,ret); FileStatus fileStatus=FileContext.getLocalFSFileContext().getFileStatus(new Path(touchFile.getAbsolutePath())); assertEquals(appSubmitter,fileStatus.getOwner()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testLocalUser() throws Exception { try { Configuration conf=new YarnConfiguration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"simple"); UserGroupInformation.setConfiguration(conf); LinuxContainerExecutor lce=new LinuxContainerExecutor(); lce.setConf(conf); Assert.assertEquals(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,lce.getRunAsUser("foo")); conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY,"bar"); lce=new LinuxContainerExecutor(); lce.setConf(conf); Assert.assertEquals("bar",lce.getRunAsUser("foo")); conf.set(YarnConfiguration.NM_NONSECURE_MODE_LOCAL_USER_KEY,"bar"); conf.setBoolean(YarnConfiguration.NM_NONSECURE_MODE_LIMIT_USERS,false); lce=new LinuxContainerExecutor(); lce.setConf(conf); Assert.assertEquals("foo",lce.getRunAsUser("foo")); conf=new YarnConfiguration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); lce=new LinuxContainerExecutor(); lce.setConf(conf); Assert.assertEquals("foo",lce.getRunAsUser("foo")); } finally { Configuration conf=new YarnConfiguration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"simple"); UserGroupInformation.setConfiguration(conf); } }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestLinuxContainerExecutorWithMocks

    InternalCallVerifier EqualityVerifier 
    @Test public void testContainerLaunch() throws IOException { String appSubmitter="nobody"; String cmd=String.valueOf(LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue()); String appId="APP_ID"; String containerId="CONTAINER_ID"; Container container=mock(Container.class); ContainerId cId=mock(ContainerId.class); ContainerLaunchContext context=mock(ContainerLaunchContext.class); HashMap env=new HashMap(); when(container.getContainerId()).thenReturn(cId); when(container.getLaunchContext()).thenReturn(context); when(cId.toString()).thenReturn(containerId); when(context.getEnvironment()).thenReturn(env); Path scriptPath=new Path("file:///bin/echo"); Path tokensPath=new Path("file:///dev/null"); Path workDir=new Path("/tmp"); Path pidFile=new Path(workDir,"pid.txt"); mockExec.activateContainer(cId,pidFile); int ret=mockExec.launchContainer(container,scriptPath,tokensPath,appSubmitter,appId,workDir,dirsHandler.getLocalDirs(),dirsHandler.getLogDirs()); assertEquals(0,ret); assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,appSubmitter,cmd,appId,containerId,workDir.toString(),"/bin/echo","/dev/null",pidFile.toString(),StringUtils.join(",",dirsHandler.getLocalDirs()),StringUtils.join(",",dirsHandler.getLogDirs()),"cgroups=none"),readMockParams()); }

    EqualityVerifier 
    @Test public void testDeleteAsUser() throws IOException { String appSubmitter="nobody"; String cmd=String.valueOf(LinuxContainerExecutor.Commands.DELETE_AS_USER.getValue()); Path dir=new Path("/tmp/testdir"); mockExec.deleteAsUser(appSubmitter,dir); assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,appSubmitter,cmd,"/tmp/testdir"),readMockParams()); }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=5000) public void testStartLocalizer() throws IOException { InetSocketAddress address=InetSocketAddress.createUnresolved("localhost",8040); Path nmPrivateCTokensPath=new Path("file:///bin/nmPrivateCTokensPath"); try { mockExec.startLocalizer(nmPrivateCTokensPath,address,"test","application_0","12345",dirsHandler.getLocalDirs(),dirsHandler.getLogDirs()); List result=readMockParams(); Assert.assertEquals(result.size(),17); Assert.assertEquals(result.get(0),YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER); Assert.assertEquals(result.get(1),"test"); Assert.assertEquals(result.get(2),"0"); Assert.assertEquals(result.get(3),"application_0"); Assert.assertEquals(result.get(4),"/bin/nmPrivateCTokensPath"); Assert.assertEquals(result.get(8),"-classpath"); Assert.assertEquals(result.get(11),"org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer"); Assert.assertEquals(result.get(12),"test"); Assert.assertEquals(result.get(13),"application_0"); Assert.assertEquals(result.get(14),"12345"); Assert.assertEquals(result.get(15),"localhost"); Assert.assertEquals(result.get(16),"8040"); } catch ( InterruptedException e) { LOG.error("Error:" + e.getMessage(),e); Assert.fail(); } }

    EqualityVerifier 
    @Test(timeout=5000) public void testContainerLaunchWithPriority() throws IOException { File f=new File("./src/test/resources/mock-container-executor"); if (!FileUtil.canExecute(f)) { FileUtil.setExecutable(f,true); } String executorPath=f.getAbsolutePath(); Configuration conf=new Configuration(); conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,executorPath); conf.setInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY,2); mockExec.setConf(conf); List command=new ArrayList(); mockExec.addSchedPriorityCommand(command); assertEquals("first should be nice","nice",command.get(0)); assertEquals("second should be -n","-n",command.get(1)); assertEquals("third should be the priority",Integer.toString(2),command.get(2)); testContainerLaunch(); }

    InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerLaunchError() throws IOException { File f=new File("./src/test/resources/mock-container-executer-with-error"); if (!FileUtil.canExecute(f)) { FileUtil.setExecutable(f,true); } String executorPath=f.getAbsolutePath(); Configuration conf=new Configuration(); conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH,executorPath); conf.set(YarnConfiguration.NM_LOCAL_DIRS,"file:///bin/echo"); conf.set(YarnConfiguration.NM_LOG_DIRS,"file:///dev/null"); mockExec=spy(new LinuxContainerExecutor()); doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable { String diagnostics=(String)invocationOnMock.getArguments()[0]; assertTrue("Invalid Diagnostics message: " + diagnostics,diagnostics.contains("badcommand")); return null; } } ).when(mockExec).logOutput(any(String.class)); dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); mockExec.setConf(conf); String appSubmitter="nobody"; String cmd=String.valueOf(LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue()); String appId="APP_ID"; String containerId="CONTAINER_ID"; Container container=mock(Container.class); ContainerId cId=mock(ContainerId.class); ContainerLaunchContext context=mock(ContainerLaunchContext.class); HashMap env=new HashMap(); when(container.getContainerId()).thenReturn(cId); when(container.getLaunchContext()).thenReturn(context); doAnswer(new Answer(){ @Override public Object answer( InvocationOnMock invocationOnMock) throws Throwable { ContainerDiagnosticsUpdateEvent event=(ContainerDiagnosticsUpdateEvent)invocationOnMock.getArguments()[0]; assertTrue("Invalid Diagnostics message: " + event.getDiagnosticsUpdate(),event.getDiagnosticsUpdate().contains("badcommand")); return null; } } ).when(container).handle(any(ContainerDiagnosticsUpdateEvent.class)); when(cId.toString()).thenReturn(containerId); when(context.getEnvironment()).thenReturn(env); Path scriptPath=new Path("file:///bin/echo"); Path tokensPath=new Path("file:///dev/null"); Path workDir=new Path("/tmp"); Path pidFile=new Path(workDir,"pid.txt"); mockExec.activateContainer(cId,pidFile); int ret=mockExec.launchContainer(container,scriptPath,tokensPath,appSubmitter,appId,workDir,dirsHandler.getLocalDirs(),dirsHandler.getLogDirs()); Assert.assertNotSame(0,ret); assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,appSubmitter,cmd,appId,containerId,workDir.toString(),"/bin/echo","/dev/null",pidFile.toString(),StringUtils.join(",",dirsHandler.getLocalDirs()),StringUtils.join(",",dirsHandler.getLogDirs()),"cgroups=none"),readMockParams()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testContainerKill() throws IOException { String appSubmitter="nobody"; String cmd=String.valueOf(LinuxContainerExecutor.Commands.SIGNAL_CONTAINER.getValue()); ContainerExecutor.Signal signal=ContainerExecutor.Signal.QUIT; String sigVal=String.valueOf(signal.getValue()); mockExec.signalContainer(appSubmitter,"1000",signal); assertEquals(Arrays.asList(YarnConfiguration.DEFAULT_NM_NONSECURE_MODE_LOCAL_USER,appSubmitter,cmd,"1000",sigVal),readMockParams()); }

    EqualityVerifier 
    @Test public void testInit() throws Exception { mockExec.init(); assertEquals(Arrays.asList("--checksetup"),readMockParams()); }

    EqualityVerifier 
    @Test(timeout=5000) public void testLaunchCommandWithoutPriority() throws IOException { List command=new ArrayList(); mockExec.addSchedPriorityCommand(command); assertEquals("addSchedPriority should be empty",0,command.size()); }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestLocalDirsHandlerService

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testValidPathsDirHandlerService(){ Configuration conf=new YarnConfiguration(); String localDir1=new File("file:///" + testDir,"localDir1").getPath(); String localDir2=new File("hdfs:///" + testDir,"localDir2").getPath(); conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir1 + "," + localDir2); String logDir1=new File("file:///" + testDir,"logDir1").getPath(); conf.set(YarnConfiguration.NM_LOG_DIRS,logDir1); LocalDirsHandlerService dirSvc=new LocalDirsHandlerService(); try { dirSvc.init(conf); Assert.fail("Service should have thrown an exception due to wrong URI"); } catch ( YarnRuntimeException e) { } Assert.assertEquals("Service should not be inited",STATE.STOPPED,dirSvc.getServiceState()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testDirStructure() throws Exception { Configuration conf=new YarnConfiguration(); String localDir1=new File("file:///" + testDir,"localDir1").getPath(); conf.set(YarnConfiguration.NM_LOCAL_DIRS,localDir1); String logDir1=new File("file:///" + testDir,"logDir1").getPath(); conf.set(YarnConfiguration.NM_LOG_DIRS,logDir1); LocalDirsHandlerService dirSvc=new LocalDirsHandlerService(); dirSvc.init(conf); Assert.assertEquals(1,dirSvc.getLocalDirs().size()); }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestNMAuditLogger

    EqualityVerifier 
    /** * Test the AuditLog format with key-val pair. */ @Test public void testKeyValLogFormat() throws Exception { StringBuilder actLog=new StringBuilder(); StringBuilder expLog=new StringBuilder(); NMAuditLogger.start(Keys.USER,USER,actLog); expLog.append("USER=test"); assertEquals(expLog.toString(),actLog.toString()); NMAuditLogger.add(Keys.OPERATION,OPERATION,actLog); expLog.append("\tOPERATION=oper"); assertEquals(expLog.toString(),actLog.toString()); NMAuditLogger.add(Keys.APPID,(String)null,actLog); expLog.append("\tAPPID=null"); assertEquals(expLog.toString(),actLog.toString()); NMAuditLogger.add(Keys.TARGET,TARGET,actLog); expLog.append("\tTARGET=tgt"); assertEquals(expLog.toString(),actLog.toString()); }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestNodeManagerReboot

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=2000000) public void testClearLocalDirWhenNodeReboot() throws IOException, YarnException, InterruptedException { nm=new MyNodeManager(); nm.start(); final ContainerManagementProtocol containerManager=nm.getContainerManager(); createFiles(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE,100); localResourceDir.mkdirs(); ContainerLaunchContext containerLaunchContext=Records.newRecord(ContainerLaunchContext.class); ContainerId cId=createContainerId(); URL localResourceUri=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(localResourceDir.getAbsolutePath()))); LocalResource localResource=LocalResource.newInstance(localResourceUri,LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,-1,localResourceDir.lastModified()); String destinationFile="dest_file"; Map localResources=new HashMap(); localResources.put(destinationFile,localResource); containerLaunchContext.setLocalResources(localResources); List commands=new ArrayList(); containerLaunchContext.setCommands(commands); NodeId nodeId=nm.getNMContext().getNodeId(); StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,TestContainerManager.createContainerToken(cId,0,nodeId,destinationFile,nm.getNMContext().getContainerTokenSecretManager())); List list=new ArrayList(); list.add(scRequest); final StartContainersRequest allRequests=StartContainersRequest.newInstance(list); final UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(cId.getApplicationAttemptId().toString()); NMTokenIdentifier nmIdentifier=new NMTokenIdentifier(cId.getApplicationAttemptId(),nodeId,user,123); currentUser.addTokenIdentifier(nmIdentifier); currentUser.doAs(new PrivilegedExceptionAction(){ @Override public Void run() throws YarnException, IOException { nm.getContainerManager().startContainers(allRequests); return null; } } ); List containerIds=new ArrayList(); containerIds.add(cId); GetContainerStatusesRequest request=GetContainerStatusesRequest.newInstance(containerIds); Container container=nm.getNMContext().getContainers().get(request.getContainerIds().get(0)); final int MAX_TRIES=20; int numTries=0; while (!container.getContainerState().equals(ContainerState.DONE) && numTries <= MAX_TRIES) { try { Thread.sleep(500); } catch ( InterruptedException ex) { } numTries++; } Assert.assertEquals(ContainerState.DONE,container.getContainerState()); Assert.assertTrue("The container should create a subDir named currentUser: " + user + "under localDir/usercache",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) > 0); Assert.assertTrue("There should be files or Dirs under nm_private when " + "container is launched",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) > 0); nm.stop(); nm=new MyNodeManager(); nm.start(); numTries=0; while ((numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) > 0 || numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE) > 0 || numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) > 0) && numTries < MAX_TRIES) { try { Thread.sleep(500); } catch ( InterruptedException ex) { } numTries++; } Assert.assertTrue("After NM reboots, all local files should be deleted",numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.USERCACHE) == 0 && numOfLocalDirs(nmLocalDir.getAbsolutePath(),ContainerLocalizer.FILECACHE) == 0 && numOfLocalDirs(nmLocalDir.getAbsolutePath(),ResourceLocalizationService.NM_PRIVATE_DIR) == 0); verify(delService,times(1)).delete((String)isNull(),argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_"))); verify(delService,times(1)).delete((String)isNull(),argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_"))); verify(delService,times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(user,null,new String[]{destinationFile}))); verify(delService,times(1)).scheduleFileDeletionTask(argThat(new FileDeletionInclude(null,ContainerLocalizer.USERCACHE + "_DEL_",new String[]{}))); }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestNodeManagerResync

    BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testNMSentContainerStatusOnResync() throws Exception { final ContainerStatus testCompleteContainer=TestNodeStatusUpdater.createContainerStatus(2,ContainerState.COMPLETE); final Container container=TestNodeStatusUpdater.getMockContainer(testCompleteContainer); NMContainerStatus report=createNMContainerStatus(2,ContainerState.COMPLETE); when(container.getNMContainerStatus()).thenReturn(report); NodeManager nm=new NodeManager(){ int registerCount=0; @Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){ return new TestNodeStatusUpdaterResync(context,dispatcher,healthChecker,metrics){ @Override protected ResourceTracker createResourceTracker(){ return new MockResourceTracker(){ @Override public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerRequest request) throws YarnException, IOException { if (registerCount == 0) { try { Assert.assertEquals(0,request.getNMContainerStatuses().size()); } catch ( AssertionError error) { error.printStackTrace(); assertionFailedInThread.set(true); } getNMContext().getContainers().put(testCompleteContainer.getContainerId(),container); } else { List statuses=request.getNMContainerStatuses(); try { Assert.assertEquals(1,statuses.size()); Assert.assertEquals(testCompleteContainer.getContainerId(),statuses.get(0).getContainerId()); } catch ( AssertionError error) { error.printStackTrace(); assertionFailedInThread.set(true); } } registerCount++; return super.registerNodeManager(request); } @Override public NodeHeartbeatResponse nodeHeartbeat( NodeHeartbeatRequest request){ List statuses=request.getNodeStatus().getContainersStatuses(); try { Assert.assertEquals(1,statuses.size()); Assert.assertEquals(testCompleteContainer.getContainerId(),statuses.get(0).getContainerId()); } catch ( AssertionError error) { error.printStackTrace(); assertionFailedInThread.set(true); } return YarnServerBuilderUtils.newNodeHeartbeatResponse(1,NodeAction.RESYNC,null,null,null,null,1000L); } } ; } } ; } } ; YarnConfiguration conf=createNMConfig(); nm.init(conf); nm.start(); try { syncBarrier.await(); } catch ( BrokenBarrierException e) { } Assert.assertFalse(assertionFailedInThread.get()); nm.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("unchecked") @Test(timeout=10000) public void testNMshutdownWhenResyncThrowException() throws IOException, InterruptedException, YarnException { NodeManager nm=new TestNodeManager3(); YarnConfiguration conf=createNMConfig(); nm.init(conf); nm.start(); Assert.assertEquals(1,((TestNodeManager3)nm).getNMRegistrationCount()); nm.getNMDispatcher().getEventHandler().handle(new NodeManagerEvent(NodeManagerEventType.RESYNC)); synchronized (isNMShutdownCalled) { while (isNMShutdownCalled.get() == false) { try { isNMShutdownCalled.wait(); } catch ( InterruptedException e) { } } } Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get()); nm.stop(); }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodeDecommision() throws Exception { nm=getNodeManager(NodeAction.SHUTDOWN); YarnConfiguration conf=createNMConfig(); nm.init(conf); Assert.assertEquals(STATE.INITED,nm.getServiceState()); nm.start(); int waitCount=0; while (heartBeatID < 1 && waitCount++ != 200) { Thread.sleep(500); } Assert.assertFalse(heartBeatID < 1); Assert.assertTrue(nm.getNMContext().getDecommissioned()); waitCount=0; while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) { LOG.info("Waiting for NM to stop.."); Thread.sleep(1000); } Assert.assertEquals(STATE.STOPPED,nm.getServiceState()); }

    IterativeVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test public void testNMRegistration() throws InterruptedException { nm=new NodeManager(){ @Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){ return new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics); } } ; YarnConfiguration conf=createNMConfig(); nm.init(conf); Object[] services=nm.getServices().toArray(); Object lastService=services[services.length - 1]; Assert.assertTrue("last service is NOT the node status updater",lastService instanceof NodeStatusUpdater); new Thread(){ public void run(){ try { nm.start(); } catch ( Throwable e) { TestNodeStatusUpdater.this.nmStartError=e; throw new YarnRuntimeException(e); } } } .start(); System.out.println(" ----- thread already started.." + nm.getServiceState()); int waitCount=0; while (nm.getServiceState() == STATE.INITED && waitCount++ != 50) { LOG.info("Waiting for NM to start.."); if (nmStartError != null) { LOG.error("Error during startup. ",nmStartError); Assert.fail(nmStartError.getCause().getMessage()); } Thread.sleep(2000); } if (nm.getServiceState() != STATE.STARTED) { Assert.fail("NodeManager failed to start"); } waitCount=0; while (heartBeatID <= 3 && waitCount++ != 200) { Thread.sleep(1000); } Assert.assertFalse(heartBeatID <= 3); Assert.assertEquals("Number of registered NMs is wrong!!",1,this.registeredNodes.size()); nm.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test public void testApplicationKeepAlive() throws Exception { MyNodeManager nm=new MyNodeManager(); try { YarnConfiguration conf=createNMConfig(); conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,true); conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,4000l); nm.init(conf); nm.start(); while (heartBeatID < 12) { Thread.sleep(1000l); } MyResourceTracker3 rt=(MyResourceTracker3)nm.getNodeStatusUpdater().getRMClient(); rt.context.getApplications().remove(rt.appId); Assert.assertEquals(1,rt.keepAliveRequests.size()); int numKeepAliveRequests=rt.keepAliveRequests.get(rt.appId).size(); LOG.info("Number of Keep Alive Requests: [" + numKeepAliveRequests + "]"); Assert.assertTrue(numKeepAliveRequests == 2 || numKeepAliveRequests == 3); while (heartBeatID < 20) { Thread.sleep(1000l); } int numKeepAliveRequests2=rt.keepAliveRequests.get(rt.appId).size(); Assert.assertEquals(numKeepAliveRequests,numKeepAliveRequests2); } finally { if (nm.getServiceState() == STATE.STARTED) nm.stop(); } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testStopReentrant() throws Exception { final AtomicInteger numCleanups=new AtomicInteger(0); nm=new NodeManager(){ @Override protected NodeStatusUpdater createNodeStatusUpdater( Context context, Dispatcher dispatcher, NodeHealthCheckerService healthChecker){ MyNodeStatusUpdater myNodeStatusUpdater=new MyNodeStatusUpdater(context,dispatcher,healthChecker,metrics); MyResourceTracker2 myResourceTracker2=new MyResourceTracker2(); myResourceTracker2.heartBeatNodeAction=NodeAction.SHUTDOWN; myNodeStatusUpdater.resourceTracker=myResourceTracker2; return myNodeStatusUpdater; } @Override protected ContainerManagerImpl createContainerManager( Context context, ContainerExecutor exec, DeletionService del, NodeStatusUpdater nodeStatusUpdater, ApplicationACLsManager aclsManager, LocalDirsHandlerService dirsHandler){ return new ContainerManagerImpl(context,exec,del,nodeStatusUpdater,metrics,aclsManager,dirsHandler){ @Override public void cleanUpApplicationsOnNMShutDown(){ super.cleanUpApplicationsOnNMShutDown(); numCleanups.incrementAndGet(); } } ; } } ; YarnConfiguration conf=createNMConfig(); nm.init(conf); nm.start(); int waitCount=0; while (heartBeatID < 1 && waitCount++ != 200) { Thread.sleep(500); } Assert.assertFalse(heartBeatID < 1); nm.stop(); waitCount=0; while (nm.getServiceState() != STATE.STOPPED && waitCount++ != 20) { LOG.info("Waiting for NM to stop.."); Thread.sleep(1000); } Assert.assertEquals(STATE.STOPPED,nm.getServiceState()); Assert.assertEquals(numCleanups.get(),1); }

    Class: org.apache.hadoop.yarn.server.nodemanager.TestRecordFactory

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testPbRecordFactory(){ RecordFactory pbRecordFactory=RecordFactoryPBImpl.get(); try { LocalizerHeartbeatResponse response=pbRecordFactory.newRecordInstance(LocalizerHeartbeatResponse.class); Assert.assertEquals(LocalizerHeartbeatResponsePBImpl.class,response.getClass()); } catch ( YarnRuntimeException e) { e.printStackTrace(); Assert.fail("Failed to crete record"); } }

    Class: org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.TestPBLocalizerRPC

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testLocalizerRPC() throws Exception { InetSocketAddress locAddr=new InetSocketAddress("0.0.0.0",8040); LocalizerService server=new LocalizerService(locAddr); try { server.start(); Configuration conf=new Configuration(); YarnRPC rpc=YarnRPC.create(conf); LocalizationProtocol client=(LocalizationProtocol)rpc.getProxy(LocalizationProtocol.class,locAddr,conf); LocalizerStatus status=recordFactory.newRecordInstance(LocalizerStatus.class); status.setLocalizerId("localizer0"); LocalizerHeartbeatResponse response=client.heartbeat(status); assertEquals(dieHBResponse(),response); } finally { server.stop(); } assertTrue(true); }

    Class: org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.impl.pb.TestPBRecordImpl

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testLocalizerHeartbeatResponseSerDe() throws Exception { LocalizerHeartbeatResponse rsrcS=createLocalizerHeartbeatResponse(); assertTrue(rsrcS instanceof LocalizerHeartbeatResponsePBImpl); LocalizerHeartbeatResponsePBImpl rsrcPb=(LocalizerHeartbeatResponsePBImpl)rsrcS; DataOutputBuffer out=new DataOutputBuffer(); rsrcPb.getProto().writeDelimitedTo(out); DataInputBuffer in=new DataInputBuffer(); in.reset(out.getData(),0,out.getLength()); LocalizerHeartbeatResponseProto rsrcPbD=LocalizerHeartbeatResponseProto.parseDelimitedFrom(in); assertNotNull(rsrcPbD); LocalizerHeartbeatResponse rsrcD=new LocalizerHeartbeatResponsePBImpl(rsrcPbD); assertEquals(rsrcS,rsrcD); assertEquals(createResource(),rsrcS.getResourceSpecs().get(0).getResource()); assertEquals(createResource(),rsrcD.getResourceSpecs().get(0).getResource()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=10000) public void testSerializedExceptionDeSer() throws Exception { YarnException yarnEx=new YarnException("Yarn_Exception"); SerializedException serEx=SerializedException.newInstance(yarnEx); Throwable throwable=serEx.deSerialize(); Assert.assertEquals(yarnEx.getClass(),throwable.getClass()); Assert.assertEquals(yarnEx.getMessage(),throwable.getMessage()); IOException ioe=new IOException("Test_IOException"); RuntimeException runtimeException=new RuntimeException("Test_RuntimeException",ioe); YarnException yarnEx2=new YarnException("Test_YarnException",runtimeException); SerializedException serEx2=SerializedException.newInstance(yarnEx2); Throwable throwable2=serEx2.deSerialize(); throwable2.printStackTrace(); Assert.assertEquals(yarnEx2.getClass(),throwable2.getClass()); Assert.assertEquals(yarnEx2.getMessage(),throwable2.getMessage()); Assert.assertEquals(runtimeException.getClass(),throwable2.getCause().getClass()); Assert.assertEquals(runtimeException.getMessage(),throwable2.getCause().getMessage()); Assert.assertEquals(ioe.getClass(),throwable2.getCause().getCause().getClass()); Assert.assertEquals(ioe.getMessage(),throwable2.getCause().getCause().getMessage()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testLocalizerStatusSerDe() throws Exception { LocalizerStatus rsrcS=createLocalizerStatus(); assertTrue(rsrcS instanceof LocalizerStatusPBImpl); LocalizerStatusPBImpl rsrcPb=(LocalizerStatusPBImpl)rsrcS; DataOutputBuffer out=new DataOutputBuffer(); rsrcPb.getProto().writeDelimitedTo(out); DataInputBuffer in=new DataInputBuffer(); in.reset(out.getData(),0,out.getLength()); LocalizerStatusProto rsrcPbD=LocalizerStatusProto.parseDelimitedFrom(in); assertNotNull(rsrcPbD); LocalizerStatus rsrcD=new LocalizerStatusPBImpl(rsrcPbD); assertEquals(rsrcS,rsrcD); assertEquals("localizer0",rsrcS.getLocalizerId()); assertEquals("localizer0",rsrcD.getLocalizerId()); assertEquals(createLocalResourceStatus(),rsrcS.getResourceStatus(0)); assertEquals(createLocalResourceStatus(),rsrcD.getResourceStatus(0)); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testLocalResourceStatusSerDe() throws Exception { LocalResourceStatus rsrcS=createLocalResourceStatus(); assertTrue(rsrcS instanceof LocalResourceStatusPBImpl); LocalResourceStatusPBImpl rsrcPb=(LocalResourceStatusPBImpl)rsrcS; DataOutputBuffer out=new DataOutputBuffer(); rsrcPb.getProto().writeDelimitedTo(out); DataInputBuffer in=new DataInputBuffer(); in.reset(out.getData(),0,out.getLength()); LocalResourceStatusProto rsrcPbD=LocalResourceStatusProto.parseDelimitedFrom(in); assertNotNull(rsrcPbD); LocalResourceStatus rsrcD=new LocalResourceStatusPBImpl(rsrcPbD); assertEquals(rsrcS,rsrcD); assertEquals(createResource(),rsrcS.getResource()); assertEquals(createResource(),rsrcD.getResource()); }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.TestAuxServices

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testAuxUnexpectedStop(){ Configuration conf=new Configuration(); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class); final AuxServices aux=new AuxServices(); aux.init(conf); aux.start(); Service s=aux.getServices().iterator().next(); s.stop(); assertEquals("Auxiliary service stopped, but AuxService unaffected.",STOPPED,aux.getServiceState()); assertTrue(aux.getServices().isEmpty()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testAuxEventDispatch(){ Configuration conf=new Configuration(); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class); conf.setInt("A.expected.init",1); conf.setInt("B.expected.stop",1); final AuxServices aux=new AuxServices(); aux.init(conf); aux.start(); ApplicationId appId1=ApplicationId.newInstance(0,65); ByteBuffer buf=ByteBuffer.allocate(6); buf.putChar('A'); buf.putInt(65); buf.flip(); AuxServicesEvent event=new AuxServicesEvent(AuxServicesEventType.APPLICATION_INIT,"user0",appId1,"Asrv",buf); aux.handle(event); ApplicationId appId2=ApplicationId.newInstance(0,66); event=new AuxServicesEvent(AuxServicesEventType.APPLICATION_STOP,"user0",appId2,"Bsrv",null); aux.handle(event); Collection servs=aux.getServices(); for ( AuxiliaryService serv : servs) { ArrayList appIds=((LightService)serv).getAppIdsStopped(); assertEquals("app not properly stopped",1,appIds.size()); assertTrue("wrong app stopped",appIds.contains((Integer)66)); } for ( AuxiliaryService serv : servs) { assertNull(((LightService)serv).containerId); assertNull(((LightService)serv).resource); } ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId1,1); ContainerTokenIdentifier cti=new ContainerTokenIdentifier(ContainerId.newInstance(attemptId,1),"","",Resource.newInstance(1,1),0,0,0,Priority.newInstance(0),0); Container container=new ContainerImpl(null,null,null,null,null,null,cti); ContainerId containerId=container.getContainerId(); Resource resource=container.getResource(); event=new AuxServicesEvent(AuxServicesEventType.CONTAINER_INIT,container); aux.handle(event); for ( AuxiliaryService serv : servs) { assertEquals(containerId,((LightService)serv).containerId); assertEquals(resource,((LightService)serv).resource); ((LightService)serv).containerId=null; ((LightService)serv).resource=null; } event=new AuxServicesEvent(AuxServicesEventType.CONTAINER_STOP,container); aux.handle(event); for ( AuxiliaryService serv : servs) { assertEquals(containerId,((LightService)serv).containerId); assertEquals(resource,((LightService)serv).resource); } }

    BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testAuxServices(){ Configuration conf=new Configuration(); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class); final AuxServices aux=new AuxServices(); aux.init(conf); int latch=1; for ( Service s : aux.getServices()) { assertEquals(INITED,s.getServiceState()); if (s instanceof ServiceA) { latch*=2; } else if (s instanceof ServiceB) { latch*=3; } else fail("Unexpected service type " + s.getClass()); } assertEquals("Invalid mix of services",6,latch); aux.start(); for ( Service s : aux.getServices()) { assertEquals(STARTED,s.getServiceState()); } aux.stop(); for ( Service s : aux.getServices()) { assertEquals(STOPPED,s.getServiceState()); } }

    BranchVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testAuxServicesMeta(){ Configuration conf=new Configuration(); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),ServiceA.class,Service.class); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),ServiceB.class,Service.class); final AuxServices aux=new AuxServices(); aux.init(conf); int latch=1; for ( Service s : aux.getServices()) { assertEquals(INITED,s.getServiceState()); if (s instanceof ServiceA) { latch*=2; } else if (s instanceof ServiceB) { latch*=3; } else fail("Unexpected service type " + s.getClass()); } assertEquals("Invalid mix of services",6,latch); aux.start(); for ( Service s : aux.getServices()) { assertEquals(STARTED,s.getServiceState()); } Map meta=aux.getMetaData(); assertEquals(2,meta.size()); assertEquals("A",new String(meta.get("Asrv").array())); assertEquals("B",new String(meta.get("Bsrv").array())); aux.stop(); for ( Service s : aux.getServices()) { assertEquals(STOPPED,s.getServiceState()); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAuxServiceRecoverySetup() throws IOException { Configuration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true); conf.set(YarnConfiguration.NM_RECOVERY_DIR,TEST_DIR.toString()); conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,new String[]{"Asrv","Bsrv"}); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Asrv"),RecoverableServiceA.class,Service.class); conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT,"Bsrv"),RecoverableServiceB.class,Service.class); try { final AuxServices aux=new AuxServices(); aux.init(conf); Assert.assertEquals(2,aux.getServices().size()); File auxStorageDir=new File(TEST_DIR,AuxServices.STATE_STORE_ROOT_NAME); Assert.assertEquals(2,auxStorageDir.listFiles().length); aux.close(); } finally { FileUtil.fullyDelete(TEST_DIR); } }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerLaunchAndStop() throws IOException, InterruptedException, YarnException { containerManager.start(); File scriptFile=Shell.appendScriptExtension(tmpDir,"scriptFile"); PrintWriter fileWriter=new PrintWriter(scriptFile); File processStartFile=new File(tmpDir,"start_file.txt").getAbsoluteFile(); ContainerId cId=createContainerId(0); if (Shell.WINDOWS) { fileWriter.println("@echo Hello World!> " + processStartFile); fileWriter.println("@echo " + cId + ">> "+ processStartFile); fileWriter.println("@ping -n 100 127.0.0.1 >nul"); } else { fileWriter.write("\numask 0"); fileWriter.write("\necho Hello World! > " + processStartFile); fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nexec sleep 100"); } fileWriter.close(); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile="dest_file"; Map localResources=new HashMap(); localResources.put(destinationFile,rsrc_alpha); containerLaunchContext.setLocalResources(localResources); List commands=Arrays.asList(Shell.getRunScriptCommand(scriptFile)); containerLaunchContext.setCommands(commands); StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager())); List list=new ArrayList(); list.add(scRequest); StartContainersRequest allRequests=StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs=0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists()); BufferedReader reader=new BufferedReader(new FileReader(processStartFile)); Assert.assertEquals("Hello World!",reader.readLine()); String pid=reader.readLine().trim(); Assert.assertEquals(null,reader.readLine()); Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid)); Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid)); List containerIds=new ArrayList(); containerIds.add(cId); StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds); containerManager.stopContainers(stopRequest); BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE); GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); int expectedExitCode=ContainerExitStatus.KILLED_BY_APPMASTER; Assert.assertEquals(expectedExitCode,containerStatus.getExitStatus()); Assert.assertFalse("Process is still alive!",DefaultContainerExecutor.containerIsAlive(pid)); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMultipleContainersLaunch() throws Exception { containerManager.start(); List list=new ArrayList(); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); for (int i=0; i < 10; i++) { ContainerId cId=createContainerId(i); long identifier=0; if ((i & 1) == 0) identifier=ResourceManagerConstants.RM_INVALID_IDENTIFIER; else identifier=DUMMY_RM_IDENTIFIER; Token containerToken=createContainerToken(cId,identifier,context.getNodeId(),user,context.getContainerTokenSecretManager()); StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken); list.add(request); } StartContainersRequest requestList=StartContainersRequest.newInstance(list); StartContainersResponse response=containerManager.startContainers(requestList); Assert.assertEquals(5,response.getSuccessfullyStartedContainers().size()); for ( ContainerId id : response.getSuccessfullyStartedContainers()) { Assert.assertEquals(1,id.getId() & 1); } Assert.assertEquals(5,response.getFailedRequests().size()); for ( Map.Entry entry : response.getFailedRequests().entrySet()) { Assert.assertEquals(0,entry.getKey().getId() & 1); Assert.assertTrue(entry.getValue().getMessage().contains("Container " + entry.getKey() + " rejected as it is allocated by a previous RM")); } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMultipleContainersStopAndGetStatus() throws Exception { containerManager.start(); List startRequest=new ArrayList(); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); List containerIds=new ArrayList(); for (int i=0; i < 10; i++) { ContainerId cId=createContainerId(i); String user=null; if ((i & 1) == 0) { user="Fail"; } else { user="Pass"; } Token containerToken=createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager()); StartContainerRequest request=StartContainerRequest.newInstance(containerLaunchContext,containerToken); startRequest.add(request); containerIds.add(cId); } StartContainersRequest requestList=StartContainersRequest.newInstance(startRequest); containerManager.startContainers(requestList); GetContainerStatusesRequest statusRequest=GetContainerStatusesRequest.newInstance(containerIds); GetContainerStatusesResponse statusResponse=containerManager.getContainerStatuses(statusRequest); Assert.assertEquals(5,statusResponse.getContainerStatuses().size()); for ( ContainerStatus status : statusResponse.getContainerStatuses()) { Assert.assertEquals(1,status.getContainerId().getId() & 1); } Assert.assertEquals(5,statusResponse.getFailedRequests().size()); for ( Map.Entry entry : statusResponse.getFailedRequests().entrySet()) { Assert.assertEquals(0,entry.getKey().getId() & 1); Assert.assertTrue(entry.getValue().getMessage().contains("Reject this container")); } StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds); StopContainersResponse stopResponse=containerManager.stopContainers(stopRequest); Assert.assertEquals(5,stopResponse.getSuccessfullyStoppedContainers().size()); for ( ContainerId id : stopResponse.getSuccessfullyStoppedContainers()) { Assert.assertEquals(1,id.getId() & 1); } Assert.assertEquals(5,stopResponse.getFailedRequests().size()); for ( Map.Entry entry : stopResponse.getFailedRequests().entrySet()) { Assert.assertEquals(0,entry.getKey().getId() & 1); Assert.assertTrue(entry.getValue().getMessage().contains("Reject this container")); } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerSetup() throws Exception { containerManager.start(); File dir=new File(tmpDir,"dir"); dir.mkdirs(); File file=new File(dir,"file"); PrintWriter fileWriter=new PrintWriter(file); fileWriter.write("Hello World!"); fileWriter.close(); ContainerId cId=createContainerId(0); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(file.getAbsolutePath()))); LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(file.lastModified()); String destinationFile="dest_file"; Map localResources=new HashMap(); localResources.put(destinationFile,rsrc_alpha); containerLaunchContext.setLocalResources(localResources); StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,DUMMY_RM_IDENTIFIER,context.getNodeId(),user,context.getContainerTokenSecretManager())); List list=new ArrayList(); list.add(scRequest); StartContainersRequest allRequests=StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE); ApplicationId appId=cId.getApplicationAttemptId().getApplicationId(); String appIDStr=ConverterUtils.toString(appId); String containerIDStr=ConverterUtils.toString(cId); File userCacheDir=new File(localDir,ContainerLocalizer.USERCACHE); File userDir=new File(userCacheDir,user); File appCache=new File(userDir,ContainerLocalizer.APPCACHE); File appDir=new File(appCache,appIDStr); File containerDir=new File(appDir,containerIDStr); File targetFile=new File(containerDir,destinationFile); File sysDir=new File(localDir,ResourceLocalizationService.NM_PRIVATE_DIR); File appSysDir=new File(sysDir,appIDStr); File containerSysDir=new File(appSysDir,containerIDStr); for ( File f : new File[]{localDir,sysDir,userCacheDir,appDir,appSysDir,containerDir,containerSysDir}) { Assert.assertTrue(f.getAbsolutePath() + " doesn't exist!!",f.exists()); Assert.assertTrue(f.getAbsolutePath() + " is not a directory!!",f.isDirectory()); } Assert.assertTrue(targetFile.getAbsolutePath() + " doesn't exist!!",targetFile.exists()); BufferedReader reader=new BufferedReader(new FileReader(targetFile)); Assert.assertEquals("Hello World!",reader.readLine()); Assert.assertEquals(null,reader.readLine()); }

    APIUtilityVerifier BranchVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerManagerInitialization() throws IOException { containerManager.start(); InetAddress localAddr=InetAddress.getLocalHost(); String fqdn=localAddr.getCanonicalHostName(); if (!localAddr.getHostAddress().equals(fqdn)) { Assert.assertEquals(fqdn,context.getNodeId().getHost()); } boolean throwsException=false; try { List containerIds=new ArrayList(); ContainerId id=createContainerId(0); containerIds.add(id); GetContainerStatusesRequest request=GetContainerStatusesRequest.newInstance(containerIds); GetContainerStatusesResponse response=containerManager.getContainerStatuses(request); if (response.getFailedRequests().containsKey(id)) { throw response.getFailedRequests().get(id).deSerialize(); } } catch ( Throwable e) { throwsException=true; } Assert.assertTrue(throwsException); }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManagerRecovery

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testApplicationRecovery() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true); conf.set(YarnConfiguration.NM_ADDRESS,"localhost:1234"); conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE,true); conf.set(YarnConfiguration.YARN_ADMIN_ACL,"yarn_admin_user"); NMStateStoreService stateStore=new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); Context context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore); ContainerManagerImpl cm=createContainerManager(context); cm.init(conf); cm.start(); MasterKey masterKey=new MasterKeyPBImpl(); masterKey.setKeyId(123); masterKey.setBytes(ByteBuffer.wrap(new byte[]{new Integer(123).byteValue()})); context.getContainerTokenSecretManager().setMasterKey(masterKey); context.getNMTokenSecretManager().setMasterKey(masterKey); String appUser="app_user1"; String modUser="modify_user1"; String viewUser="view_user1"; String enemyUser="enemy_user"; ApplicationId appId=ApplicationId.newInstance(0,1); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId cid=ContainerId.newInstance(attemptId,1); Map localResources=Collections.emptyMap(); Map containerEnv=Collections.emptyMap(); List containerCmds=Collections.emptyList(); Map serviceData=Collections.emptyMap(); Credentials containerCreds=new Credentials(); DataOutputBuffer dob=new DataOutputBuffer(); containerCreds.writeTokenStorageToStream(dob); ByteBuffer containerTokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength()); Map acls=new HashMap(); acls.put(ApplicationAccessType.MODIFY_APP,modUser); acls.put(ApplicationAccessType.VIEW_APP,viewUser); ContainerLaunchContext clc=ContainerLaunchContext.newInstance(localResources,containerEnv,containerCmds,serviceData,containerTokens,acls); StartContainersResponse startResponse=startContainer(context,cm,cid,clc); assertTrue(startResponse.getFailedRequests().isEmpty()); assertEquals(1,context.getApplications().size()); Application app=context.getApplications().get(appId); assertNotNull(app); waitForAppState(app,ApplicationState.INITING); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId)); cm.stop(); context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore); cm=createContainerManager(context); cm.init(conf); cm.start(); assertEquals(1,context.getApplications().size()); app=context.getApplications().get(appId); assertNotNull(app); waitForAppState(app,ApplicationState.INITING); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId)); List finishedApps=new ArrayList(); finishedApps.add(appId); cm.handle(new CMgrCompletedAppsEvent(finishedApps,CMgrCompletedAppsEvent.Reason.BY_RESOURCEMANAGER)); waitForAppState(app,ApplicationState.APPLICATION_RESOURCES_CLEANINGUP); cm.stop(); context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore); cm=createContainerManager(context); cm.init(conf); cm.start(); assertEquals(1,context.getApplications().size()); app=context.getApplications().get(appId); assertNotNull(app); waitForAppState(app,ApplicationState.APPLICATION_RESOURCES_CLEANINGUP); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(modUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.MODIFY_APP,appUser,appId)); assertTrue(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(viewUser),ApplicationAccessType.VIEW_APP,appUser,appId)); assertFalse(context.getApplicationACLsManager().checkAccess(UserGroupInformation.createRemoteUser(enemyUser),ApplicationAccessType.VIEW_APP,appUser,appId)); app.handle(new ApplicationEvent(app.getAppId(),ApplicationEventType.APPLICATION_RESOURCES_CLEANEDUP)); assertEquals(app.getApplicationState(),ApplicationState.FINISHED); app.handle(new ApplicationEvent(app.getAppId(),ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)); cm.stop(); context=new NMContext(new NMContainerTokenSecretManager(conf),new NMTokenSecretManagerInNM(),null,new ApplicationACLsManager(conf),stateStore); cm=createContainerManager(context); cm.init(conf); cm.start(); assertTrue(context.getApplications().isEmpty()); cm.stop(); }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.application.TestApplication

    InternalCallVerifier EqualityVerifier 
    /** * All container start events before application running. */ @Test public void testApplicationInit1(){ WrappedApplication wa=null; try { wa=new WrappedApplication(1,314159265358979L,"yak",3); wa.initApplication(); wa.initContainer(1); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); assertEquals(1,wa.app.getContainers().size()); wa.initContainer(0); wa.initContainer(2); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); assertEquals(3,wa.app.getContainers().size()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); for (int i=0; i < wa.containers.size(); i++) { verify(wa.containerBus).handle(argThat(new ContainerInitMatcher(wa.containers.get(i).getContainerId()))); } } finally { if (wa != null) wa.finished(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNMTokenSecretManagerCleanup(){ WrappedApplication wa=null; try { wa=new WrappedApplication(1,314159265358979L,"yak",1); wa.initApplication(); wa.initContainer(0); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); assertEquals(1,wa.app.getContainers().size()); wa.appFinished(); wa.containerFinished(0); wa.appResourcesCleanedup(); assertEquals(ApplicationState.FINISHED,wa.app.getApplicationState()); verify(wa.nmTokenSecretMgr).appFinished(eq(wa.appId)); } finally { if (wa != null) { wa.finished(); } } }

    InternalCallVerifier EqualityVerifier 
    /** * Container start events after Application Running */ @Test public void testApplicationInit2(){ WrappedApplication wa=null; try { wa=new WrappedApplication(2,314159265358979L,"yak",3); wa.initApplication(); wa.initContainer(0); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); assertEquals(1,wa.app.getContainers().size()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); verify(wa.containerBus).handle(argThat(new ContainerInitMatcher(wa.containers.get(0).getContainerId()))); wa.initContainer(1); wa.initContainer(2); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); assertEquals(3,wa.app.getContainers().size()); for (int i=1; i < wa.containers.size(); i++) { verify(wa.containerBus).handle(argThat(new ContainerInitMatcher(wa.containers.get(i).getContainerId()))); } } finally { if (wa != null) wa.finished(); } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testAppFinishedOnCompletedContainers(){ WrappedApplication wa=null; try { wa=new WrappedApplication(5,314159265358979L,"yak",3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); reset(wa.localizerBus); wa.containerFinished(0); wa.containerFinished(1); wa.containerFinished(2); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); assertEquals(0,wa.app.getContainers().size()); wa.appFinished(); assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,wa.app.getApplicationState()); verify(wa.localizerBus).handle(refEq(new ApplicationLocalizationEvent(LocalizationEventType.DESTROY_APPLICATION_RESOURCES,wa.app))); wa.appResourcesCleanedup(); for ( Container container : wa.containers) { ContainerTokenIdentifier identifier=wa.getContainerTokenIdentifier(container.getContainerId()); waitForContainerTokenToExpire(identifier); Assert.assertTrue(wa.context.getContainerTokenSecretManager().isValidStartContainerRequest(identifier)); } assertEquals(ApplicationState.FINISHED,wa.app.getApplicationState()); } finally { if (wa != null) wa.finished(); } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testAppFinishedOnRunningContainers(){ WrappedApplication wa=null; try { wa=new WrappedApplication(4,314159265358979L,"yak",3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); wa.containerFinished(0); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); assertEquals(2,wa.app.getContainers().size()); wa.appFinished(); assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,wa.app.getApplicationState()); assertEquals(2,wa.app.getContainers().size()); for (int i=1; i < wa.containers.size(); i++) { verify(wa.containerBus).handle(argThat(new ContainerKillMatcher(wa.containers.get(i).getContainerId()))); } wa.containerFinished(1); assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,wa.app.getApplicationState()); assertEquals(1,wa.app.getContainers().size()); reset(wa.localizerBus); wa.containerFinished(2); assertEquals(ApplicationState.APPLICATION_RESOURCES_CLEANINGUP,wa.app.getApplicationState()); assertEquals(0,wa.app.getContainers().size()); verify(wa.localizerBus).handle(refEq(new ApplicationLocalizationEvent(LocalizationEventType.DESTROY_APPLICATION_RESOURCES,wa.app))); verify(wa.auxBus).handle(refEq(new AuxServicesEvent(AuxServicesEventType.APPLICATION_STOP,wa.appId))); wa.appResourcesCleanedup(); for ( Container container : wa.containers) { ContainerTokenIdentifier identifier=wa.getContainerTokenIdentifier(container.getContainerId()); waitForContainerTokenToExpire(identifier); Assert.assertTrue(wa.context.getContainerTokenSecretManager().isValidStartContainerRequest(identifier)); } assertEquals(ApplicationState.FINISHED,wa.app.getApplicationState()); } finally { if (wa != null) wa.finished(); } }

    InternalCallVerifier EqualityVerifier 
    /** * Finished containers properly tracked when 1 of several containers finishes in APP_INITING */ @Test public void testContainersCompleteDuringAppInit2(){ WrappedApplication wa=null; try { wa=new WrappedApplication(3,314159265358979L,"yak",3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); wa.containerFinished(0); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); assertEquals(2,wa.app.getContainers().size()); wa.containerFinished(1); wa.containerFinished(2); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); assertEquals(0,wa.app.getContainers().size()); } finally { if (wa != null) wa.finished(); } }

    InternalCallVerifier EqualityVerifier 
    /** * Finished containers properly tracked when only container finishes in APP_INITING */ @Test public void testContainersCompleteDuringAppInit1(){ WrappedApplication wa=null; try { wa=new WrappedApplication(3,314159265358979L,"yak",1); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); wa.containerFinished(0); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); assertEquals(0,wa.app.getContainers().size()); } finally { if (wa != null) wa.finished(); } }

    InternalCallVerifier EqualityVerifier 
    /** * App state RUNNING after all containers complete, before RM sends * APP_FINISHED */ @Test public void testAppRunningAfterContainersComplete(){ WrappedApplication wa=null; try { wa=new WrappedApplication(3,314159265358979L,"yak",3); wa.initApplication(); wa.initContainer(-1); assertEquals(ApplicationState.INITING,wa.app.getApplicationState()); wa.applicationInited(); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); wa.containerFinished(0); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); assertEquals(2,wa.app.getContainers().size()); wa.containerFinished(1); wa.containerFinished(2); assertEquals(ApplicationState.RUNNING,wa.app.getApplicationState()); assertEquals(0,wa.app.getContainers().size()); } finally { if (wa != null) wa.finished(); } }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.container.TestContainer

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testInitWhileDone() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(6,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerSuccessful(); wc.containerResourcesCleanup(); assertEquals(ContainerState.DONE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.initContainer(); assertEquals(ContainerState.DONE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testResourceLocalizedOnLocalizationFailed() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(16,314159265358979L,4344,"yak"); wc.initContainer(); int failCount=wc.getLocalResourceCount() / 2; if (failCount == 0) { failCount=1; } wc.failLocalizeResources(failCount); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.localizeResourcesFromInvalidState(failCount); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); Assert.assertTrue(wc.getDiagnostics().contains(FAKE_LOCALIZATION_ERROR)); } finally { if (wc != null) { wc.finished(); } } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testKillOnLocalizedWhenContainerLaunched() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(17,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState()); ContainerLaunch launcher=wc.launcher.running.get(wc.c.getContainerId()); launcher.call(); wc.drainDispatcherEvents(); assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState()); wc.killContainer(); assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testKillOnLocalizedWhenContainerNotLaunched() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(17,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState()); ContainerLaunch launcher=wc.launcher.running.get(wc.c.getContainerId()); wc.killContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); launcher.call(); wc.drainDispatcherEvents(); assertEquals(ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); wc.c.handle(new ContainerEvent(wc.c.getContainerId(),ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP)); assertEquals(0,metrics.getRunningContainers()); } finally { if (wc != null) { wc.finished(); } } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testCleanupOnKillRequest() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(12,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.killContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.containerKilledOnRequest(); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    InternalCallVerifier EqualityVerifier 
    /** * Verify correct container request events sent to localizer. */ @Test public void testLocalizationRequest() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(7,314159265358979L,4344,"yak"); assertEquals(ContainerState.NEW,wc.c.getContainerState()); wc.initContainer(); ResourcesRequestedMatcher matchesReq=new ResourcesRequestedMatcher(wc.localResources,EnumSet.of(LocalResourceVisibility.PUBLIC,LocalResourceVisibility.PRIVATE,LocalResourceVisibility.APPLICATION)); verify(wc.localizerBus).handle(argThat(matchesReq)); assertEquals(ContainerState.LOCALIZING,wc.c.getContainerState()); } finally { if (wc != null) { wc.finished(); } } }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testResourceFailedOnLocalizationFailed() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(16,314159265358979L,4344,"yak"); wc.initContainer(); Iterator lRsrcKeys=wc.localResources.keySet().iterator(); String key1=lRsrcKeys.next(); String key2=lRsrcKeys.next(); wc.failLocalizeSpecificResource(key1); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.failLocalizeSpecificResource(key2); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testCleanupOnSuccess() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(11,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerSuccessful(); assertEquals(ContainerState.EXITED_WITH_SUCCESS,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testKillOnLocalizing() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(14,314159265358979L,4344,"yak"); wc.initContainer(); assertEquals(ContainerState.LOCALIZING,wc.c.getContainerState()); wc.killContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,wc.c.cloneAndGetContainerStatus().getExitStatus()); assertTrue(wc.c.cloneAndGetContainerStatus().getDiagnostics().contains("KillRequest")); } finally { if (wc != null) { wc.finished(); } } }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testKillOnLocalizationFailed() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(15,314159265358979L,4344,"yak"); wc.initContainer(); wc.failLocalizeResources(wc.getLocalResourceCount()); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.killContainer(); assertEquals(ContainerState.LOCALIZATION_FAILED,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testLaunchAfterKillRequest() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(14,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.killContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.launchContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.containerKilledOnRequest(); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testResourceFailedOnKilling() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(16,314159265358979L,4344,"yak"); wc.initContainer(); Iterator lRsrcKeys=wc.localResources.keySet().iterator(); String key1=lRsrcKeys.next(); wc.killContainer(); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.failLocalizeSpecificResource(key1); assertEquals(ContainerState.KILLING,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testCleanupOnFailure() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(10,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerFailed(ExitCode.FORCE_KILLED.getExitCode()); assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testExternalKill() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(13,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerKilledOnRequest(); assertEquals(ContainerState.EXITED_WITH_FAILURE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testLocalizationFailureAtDone() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(6,314159265358979L,4344,"yak"); wc.initContainer(); wc.localizeResources(); wc.launchContainer(); reset(wc.localizerBus); wc.containerSuccessful(); wc.containerResourcesCleanup(); assertEquals(ContainerState.DONE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); wc.resourceFailedContainer(); assertEquals(ContainerState.DONE,wc.c.getContainerState()); assertNull(wc.c.getLocalizedResources()); verifyCleanupCall(wc); } finally { if (wc != null) { wc.finished(); } } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testKillOnNew() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(13,314159265358979L,4344,"yak"); assertEquals(ContainerState.NEW,wc.c.getContainerState()); wc.killContainer(); assertEquals(ContainerState.DONE,wc.c.getContainerState()); assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,wc.c.cloneAndGetContainerStatus().getExitStatus()); assertTrue(wc.c.cloneAndGetContainerStatus().getDiagnostics().contains("KillRequest")); } finally { if (wc != null) { wc.finished(); } } }

    EqualityVerifier 
    /** * Verify serviceData correctly sent. */ @Test public void testServiceData() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(9,314159265358979L,4344,"yak",false,true); assertEquals(ContainerState.NEW,wc.c.getContainerState()); wc.initContainer(); for ( final Map.Entry e : wc.serviceData.entrySet()) { ArgumentMatcher matchesServiceReq=new ArgumentMatcher(){ @Override public boolean matches( Object o){ AuxServicesEvent evt=(AuxServicesEvent)o; return e.getKey().equals(evt.getServiceID()) && 0 == e.getValue().compareTo(evt.getServiceData()); } } ; verify(wc.auxBus).handle(argThat(matchesServiceReq)); } final WrappedContainer wcf=wc; ArgumentMatcher matchesLaunchReq=new ArgumentMatcher(){ @Override public boolean matches( Object o){ ContainersLauncherEvent evt=(ContainersLauncherEvent)o; return evt.getType() == ContainersLauncherEventType.LAUNCH_CONTAINER && wcf.cId == evt.getContainer().getContainerId(); } } ; verify(wc.launcherBus).handle(argThat(matchesLaunchReq)); } finally { if (wc != null) { wc.finished(); } } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Verify container launch when all resources already cached. */ @Test public void testLocalizationLaunch() throws Exception { WrappedContainer wc=null; try { wc=new WrappedContainer(8,314159265358979L,4344,"yak"); assertEquals(ContainerState.NEW,wc.c.getContainerState()); wc.initContainer(); Map> localPaths=wc.localizeResources(); assertEquals(ContainerState.LOCALIZED,wc.c.getContainerState()); assertNotNull(wc.c.getLocalizedResources()); for ( Entry> loc : wc.c.getLocalizedResources().entrySet()) { assertEquals(localPaths.remove(loc.getKey()),loc.getValue()); } assertTrue(localPaths.isEmpty()); final WrappedContainer wcf=wc; ArgumentMatcher matchesContainerLaunch=new ArgumentMatcher(){ @Override public boolean matches( Object o){ ContainersLauncherEvent launchEvent=(ContainersLauncherEvent)o; return wcf.c == launchEvent.getContainer(); } } ; verify(wc.launcherBus).handle(argThat(matchesContainerLaunch)); } finally { if (wc != null) { wc.finished(); } } }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSpecialCharSymlinks() throws IOException { File shellFile=null; File tempFile=null; String badSymlink=Shell.WINDOWS ? "foo@zz_#!-+bar.cmd" : "foo@zz%_#*&!-+= bar()"; File symLinkFile=null; try { shellFile=Shell.appendScriptExtension(tmpDir,"hello"); tempFile=Shell.appendScriptExtension(tmpDir,"temp"); String timeoutCommand=Shell.WINDOWS ? "@echo \"hello\"" : "echo \"hello\""; PrintWriter writer=new PrintWriter(new FileOutputStream(shellFile)); FileUtil.setExecutable(shellFile,true); writer.println(timeoutCommand); writer.close(); Map> resources=new HashMap>(); Path path=new Path(shellFile.getAbsolutePath()); resources.put(path,Arrays.asList(badSymlink)); FileOutputStream fos=new FileOutputStream(tempFile); Map env=new HashMap(); List commands=new ArrayList(); if (Shell.WINDOWS) { commands.add("cmd"); commands.add("/c"); commands.add("\"" + badSymlink + "\""); } else { commands.add("/bin/sh ./\\\"" + badSymlink + "\\\""); } ContainerLaunch.writeLaunchEnv(fos,env,resources,commands); fos.flush(); fos.close(); FileUtil.setExecutable(tempFile,true); Shell.ShellCommandExecutor shexc=new Shell.ShellCommandExecutor(new String[]{tempFile.getAbsolutePath()},tmpDir); shexc.execute(); assertEquals(shexc.getExitCode(),0); assert (shexc.getOutput().contains("hello")); symLinkFile=new File(tmpDir,badSymlink); } finally { if (shellFile != null && shellFile.exists()) { shellFile.delete(); } if (tempFile != null && tempFile.exists()) { tempFile.delete(); } if (symLinkFile != null && symLinkFile.exists()) { symLinkFile.delete(); } } }

    UtilityVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier 
    @Test(timeout=10000) public void testWindowsShellScriptBuilderMkdir() throws IOException { String mkDirCmd="@if not exist \"\" mkdir \"\""; Assume.assumeTrue(Shell.WINDOWS); assertEquals(8191,Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder=ShellScriptBuilder.create(); builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("A",1024))); builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("E",(Shell.WINDOWS_MAX_SHELL_LENGHT - mkDirCmd.length()) / 2))); try { builder.mkdir(new Path(org.apache.commons.lang.StringUtils.repeat("X",(Shell.WINDOWS_MAX_SHELL_LENGHT - mkDirCmd.length()) / 2 + 1))); fail("long mkdir was expected to throw"); } catch ( IOException e) { assertThat(e.getMessage(),containsString(expectedMessage)); } }

    UtilityVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier 
    @Test(timeout=10000) public void testWindowsShellScriptBuilderLink() throws IOException { Assume.assumeTrue(Shell.WINDOWS); String linkCmd="@" + Shell.WINUTILS + " symlink \"\" \"\""; assertEquals(8191,Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder=ShellScriptBuilder.create(); builder.link(new Path(org.apache.commons.lang.StringUtils.repeat("A",1024)),new Path(org.apache.commons.lang.StringUtils.repeat("B",1024))); builder.link(new Path(org.apache.commons.lang.StringUtils.repeat("E",(Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length()) / 2)),new Path(org.apache.commons.lang.StringUtils.repeat("F",(Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length()) / 2))); try { builder.link(new Path(org.apache.commons.lang.StringUtils.repeat("X",(Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length()) / 2 + 1)),new Path(org.apache.commons.lang.StringUtils.repeat("Y",(Shell.WINDOWS_MAX_SHELL_LENGHT - linkCmd.length()) / 2) + 1)); fail("long link was expected to throw"); } catch ( IOException e) { assertThat(e.getMessage(),containsString(expectedMessage)); } }

    UtilityVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier 
    @Test(timeout=10000) public void testWindowsShellScriptBuilderCommand() throws IOException { String callCmd="@call "; Assume.assumeTrue(Shell.WINDOWS); assertEquals(8191,Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder=ShellScriptBuilder.create(); builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("A",1024))); builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("E",Shell.WINDOWS_MAX_SHELL_LENGHT - callCmd.length()))); try { builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("X",Shell.WINDOWS_MAX_SHELL_LENGHT - callCmd.length() + 1))); fail("longCommand was expected to throw"); } catch ( IOException e) { assertThat(e.getMessage(),containsString(expectedMessage)); } builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("A",1024),org.apache.commons.lang.StringUtils.repeat("A",1024),org.apache.commons.lang.StringUtils.repeat("A",1024))); builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("E",4095),org.apache.commons.lang.StringUtils.repeat("E",2047),org.apache.commons.lang.StringUtils.repeat("E",2047 - callCmd.length()))); try { builder.command(Arrays.asList(org.apache.commons.lang.StringUtils.repeat("X",4095),org.apache.commons.lang.StringUtils.repeat("X",2047),org.apache.commons.lang.StringUtils.repeat("X",2048 - callCmd.length()))); fail("long commands was expected to throw"); } catch ( IOException e) { assertThat(e.getMessage(),containsString(expectedMessage)); } }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("rawtypes") @Test(timeout=10000) public void testCallFailureWithNullLocalizedResources(){ Container container=mock(Container.class); when(container.getContainerId()).thenReturn(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(System.currentTimeMillis(),1),1),1)); ContainerLaunchContext clc=mock(ContainerLaunchContext.class); when(clc.getCommands()).thenReturn(Collections.emptyList()); when(container.getLaunchContext()).thenReturn(clc); when(container.getLocalizedResources()).thenReturn(null); Dispatcher dispatcher=mock(Dispatcher.class); EventHandler eventHandler=new EventHandler(){ public void handle( Event event){ Assert.assertTrue(event instanceof ContainerExitEvent); ContainerExitEvent exitEvent=(ContainerExitEvent)event; Assert.assertEquals(ContainerEventType.CONTAINER_EXITED_WITH_FAILURE,exitEvent.getType()); } } ; when(dispatcher.getEventHandler()).thenReturn(eventHandler); ContainerLaunch launch=new ContainerLaunch(context,new Configuration(),dispatcher,exec,null,container,dirsHandler,containerManager); launch.call(); }

    EqualityVerifier 
    @Test(timeout=5000) public void testAuxiliaryServiceHelper() throws Exception { Map env=new HashMap(); String serviceName="testAuxiliaryService"; ByteBuffer bb=ByteBuffer.wrap("testAuxiliaryService".getBytes()); AuxiliaryServiceHelper.setServiceDataIntoEnv(serviceName,bb,env); Assert.assertEquals(bb,AuxiliaryServiceHelper.getServiceDataFromEnv(serviceName,env)); }

    UtilityVerifier AssumptionSetter EqualityVerifier ConditionMatcher HybridVerifier 
    @Test(timeout=10000) public void testWindowsShellScriptBuilderEnv() throws IOException { Assume.assumeTrue(Shell.WINDOWS); assertEquals(8191,Shell.WINDOWS_MAX_SHELL_LENGHT); ShellScriptBuilder builder=ShellScriptBuilder.create(); builder.env("somekey",org.apache.commons.lang.StringUtils.repeat("A",1024)); builder.env("somekey",org.apache.commons.lang.StringUtils.repeat("A",Shell.WINDOWS_MAX_SHELL_LENGHT - ("@set somekey=").length())); try { builder.env("somekey",org.apache.commons.lang.StringUtils.repeat("A",Shell.WINDOWS_MAX_SHELL_LENGHT - ("@set somekey=").length()) + 1); fail("long env was expected to throw"); } catch ( IOException e) { assertThat(e.getMessage(),containsString(expectedMessage)); } }

    APIUtilityVerifier BranchVerifier EqualityVerifier 
    @Test(timeout=10000) public void testEnvExpansion() throws IOException { Path logPath=new Path("/nm/container/logs"); String input=Apps.crossPlatformify("HADOOP_HOME") + "/share/hadoop/common/*" + ApplicationConstants.CLASS_PATH_SEPARATOR+ Apps.crossPlatformify("HADOOP_HOME")+ "/share/hadoop/common/lib/*"+ ApplicationConstants.CLASS_PATH_SEPARATOR+ Apps.crossPlatformify("HADOOP_LOG_HOME")+ ApplicationConstants.LOG_DIR_EXPANSION_VAR; String res=ContainerLaunch.expandEnvironment(input,logPath); if (Shell.WINDOWS) { Assert.assertEquals("%HADOOP_HOME%/share/hadoop/common/*;" + "%HADOOP_HOME%/share/hadoop/common/lib/*;" + "%HADOOP_LOG_HOME%/nm/container/logs",res); } else { Assert.assertEquals("$HADOOP_HOME/share/hadoop/common/*:" + "$HADOOP_HOME/share/hadoop/common/lib/*:" + "$HADOOP_LOG_HOME/nm/container/logs",res); } System.out.println(res); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * See if environment variable is forwarded using sanitizeEnv. * @throws Exception */ @Test(timeout=60000) public void testContainerEnvVariables() throws Exception { containerManager.start(); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); ApplicationId appId=ApplicationId.newInstance(0,0); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId cId=ContainerId.newInstance(appAttemptId,0); Map userSetEnv=new HashMap(); userSetEnv.put(Environment.CONTAINER_ID.name(),"user_set_container_id"); userSetEnv.put(Environment.NM_HOST.name(),"user_set_NM_HOST"); userSetEnv.put(Environment.NM_PORT.name(),"user_set_NM_PORT"); userSetEnv.put(Environment.NM_HTTP_PORT.name(),"user_set_NM_HTTP_PORT"); userSetEnv.put(Environment.LOCAL_DIRS.name(),"user_set_LOCAL_DIR"); userSetEnv.put(Environment.USER.key(),"user_set_" + Environment.USER.key()); userSetEnv.put(Environment.LOGNAME.name(),"user_set_LOGNAME"); userSetEnv.put(Environment.PWD.name(),"user_set_PWD"); userSetEnv.put(Environment.HOME.name(),"user_set_HOME"); containerLaunchContext.setEnvironment(userSetEnv); File scriptFile=Shell.appendScriptExtension(tmpDir,"scriptFile"); PrintWriter fileWriter=new PrintWriter(scriptFile); File processStartFile=new File(tmpDir,"env_vars.txt").getAbsoluteFile(); if (Shell.WINDOWS) { fileWriter.println("@echo " + Environment.CONTAINER_ID.$() + "> "+ processStartFile); fileWriter.println("@echo " + Environment.NM_HOST.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.NM_PORT.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.NM_HTTP_PORT.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.USER.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.LOGNAME.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.PWD.$() + ">> "+ processStartFile); fileWriter.println("@echo " + Environment.HOME.$() + ">> "+ processStartFile); for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) { fileWriter.println("@echo %" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName+ "%>> "+ processStartFile); } fileWriter.println("@echo " + cId + ">> "+ processStartFile); fileWriter.println("@ping -n 100 127.0.0.1 >nul"); } else { fileWriter.write("\numask 0"); fileWriter.write("\necho $" + Environment.CONTAINER_ID.name() + " > "+ processStartFile); fileWriter.write("\necho $" + Environment.NM_HOST.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.NM_PORT.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.NM_HTTP_PORT.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.USER.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.LOGNAME.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.PWD.name() + " >> "+ processStartFile); fileWriter.write("\necho $" + Environment.HOME.name() + " >> "+ processStartFile); for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) { fileWriter.write("\necho $" + AuxiliaryServiceHelper.NM_AUX_SERVICE + serviceName+ " >> "+ processStartFile); } fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nexec sleep 100"); } fileWriter.close(); URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile="dest_file"; Map localResources=new HashMap(); localResources.put(destinationFile,rsrc_alpha); containerLaunchContext.setLocalResources(localResources); List commands=Arrays.asList(Shell.getRunScriptCommand(scriptFile)); containerLaunchContext.setCommands(commands); StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,createContainerToken(cId,Priority.newInstance(0),0)); List list=new ArrayList(); list.add(scRequest); StartContainersRequest allRequests=StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs=0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists()); List localDirs=dirsHandler.getLocalDirs(); List logDirs=dirsHandler.getLogDirs(); List appDirs=new ArrayList(localDirs.size()); for ( String localDir : localDirs) { Path usersdir=new Path(localDir,ContainerLocalizer.USERCACHE); Path userdir=new Path(usersdir,user); Path appsdir=new Path(userdir,ContainerLocalizer.APPCACHE); appDirs.add(new Path(appsdir,appId.toString())); } List containerLogDirs=new ArrayList(); String relativeContainerLogDir=ContainerLaunch.getRelativeContainerLogDir(appId.toString(),cId.toString()); for ( String logDir : logDirs) { containerLogDirs.add(logDir + Path.SEPARATOR + relativeContainerLogDir); } BufferedReader reader=new BufferedReader(new FileReader(processStartFile)); Assert.assertEquals(cId.toString(),reader.readLine()); Assert.assertEquals(context.getNodeId().getHost(),reader.readLine()); Assert.assertEquals(String.valueOf(context.getNodeId().getPort()),reader.readLine()); Assert.assertEquals(String.valueOf(HTTP_PORT),reader.readLine()); Assert.assertEquals(StringUtils.join(",",appDirs),reader.readLine()); Assert.assertEquals(user,reader.readLine()); Assert.assertEquals(user,reader.readLine()); String obtainedPWD=reader.readLine(); boolean found=false; for ( Path localDir : appDirs) { if (new Path(localDir,cId.toString()).toString().equals(obtainedPWD)) { found=true; break; } } Assert.assertTrue("Wrong local-dir found : " + obtainedPWD,found); Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR,YarnConfiguration.DEFAULT_NM_USER_HOME_DIR),reader.readLine()); for ( String serviceName : containerManager.getAuxServiceMetaData().keySet()) { Assert.assertEquals(containerManager.getAuxServiceMetaData().get(serviceName),ByteBuffer.wrap(Base64.decodeBase64(reader.readLine().getBytes()))); } Assert.assertEquals(cId.toString(),containerLaunchContext.getEnvironment().get(Environment.CONTAINER_ID.name())); Assert.assertEquals(context.getNodeId().getHost(),containerLaunchContext.getEnvironment().get(Environment.NM_HOST.name())); Assert.assertEquals(String.valueOf(context.getNodeId().getPort()),containerLaunchContext.getEnvironment().get(Environment.NM_PORT.name())); Assert.assertEquals(String.valueOf(HTTP_PORT),containerLaunchContext.getEnvironment().get(Environment.NM_HTTP_PORT.name())); Assert.assertEquals(StringUtils.join(",",appDirs),containerLaunchContext.getEnvironment().get(Environment.LOCAL_DIRS.name())); Assert.assertEquals(StringUtils.join(",",containerLogDirs),containerLaunchContext.getEnvironment().get(Environment.LOG_DIRS.name())); Assert.assertEquals(user,containerLaunchContext.getEnvironment().get(Environment.USER.name())); Assert.assertEquals(user,containerLaunchContext.getEnvironment().get(Environment.LOGNAME.name())); found=false; obtainedPWD=containerLaunchContext.getEnvironment().get(Environment.PWD.name()); for ( Path localDir : appDirs) { if (new Path(localDir,cId.toString()).toString().equals(obtainedPWD)) { found=true; break; } } Assert.assertTrue("Wrong local-dir found : " + obtainedPWD,found); Assert.assertEquals(conf.get(YarnConfiguration.NM_USER_HOME_DIR,YarnConfiguration.DEFAULT_NM_USER_HOME_DIR),containerLaunchContext.getEnvironment().get(Environment.HOME.name())); String pid=reader.readLine().trim(); Assert.assertEquals(null,reader.readLine()); Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid)); Assert.assertTrue("Process is not alive!",DefaultContainerExecutor.containerIsAlive(pid)); List containerIds=new ArrayList(); containerIds.add(cId); StopContainersRequest stopRequest=StopContainersRequest.newInstance(containerIds); containerManager.stopContainers(stopRequest); BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE); GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); int expectedExitCode=ContainerExitStatus.KILLED_BY_APPMASTER; Assert.assertEquals(expectedExitCode,containerStatus.getExitStatus()); Assert.assertFalse("Process is still alive!",DefaultContainerExecutor.containerIsAlive(pid)); }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestContainerLocalizer

    EqualityVerifier 
    @Test public void testContainerLocalizerMain() throws Exception { FileContext fs=FileContext.getLocalFSFileContext(); spylfs=spy(fs.getDefaultFileSystem()); ContainerLocalizer localizer=setupContainerLocalizerForTest(); List privCacheList=new ArrayList(); List appCacheList=new ArrayList(); for ( Path p : localDirs) { Path base=new Path(new Path(p,ContainerLocalizer.USERCACHE),appUser); Path privcache=new Path(base,ContainerLocalizer.FILECACHE); privCacheList.add(privcache); Path appDir=new Path(base,new Path(ContainerLocalizer.APPCACHE,appId)); Path appcache=new Path(appDir,ContainerLocalizer.FILECACHE); appCacheList.add(appcache); } ResourceLocalizationSpec rsrcA=getMockRsrc(random,LocalResourceVisibility.PRIVATE,privCacheList.get(0)); ResourceLocalizationSpec rsrcB=getMockRsrc(random,LocalResourceVisibility.PRIVATE,privCacheList.get(0)); ResourceLocalizationSpec rsrcC=getMockRsrc(random,LocalResourceVisibility.APPLICATION,appCacheList.get(0)); ResourceLocalizationSpec rsrcD=getMockRsrc(random,LocalResourceVisibility.PRIVATE,privCacheList.get(0)); when(nmProxy.heartbeat(isA(LocalizerStatus.class))).thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,Collections.singletonList(rsrcA))).thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,Collections.singletonList(rsrcB))).thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,Collections.singletonList(rsrcC))).thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,Collections.singletonList(rsrcD))).thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,Collections.emptyList())).thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.DIE,null)); LocalResource tRsrcA=rsrcA.getResource(); LocalResource tRsrcB=rsrcB.getResource(); LocalResource tRsrcC=rsrcC.getResource(); LocalResource tRsrcD=rsrcD.getResource(); doReturn(new FakeDownload(rsrcA.getResource().getResource().getFile(),true)).when(localizer).download(isA(Path.class),eq(tRsrcA),isA(UserGroupInformation.class)); doReturn(new FakeDownload(rsrcB.getResource().getResource().getFile(),true)).when(localizer).download(isA(Path.class),eq(tRsrcB),isA(UserGroupInformation.class)); doReturn(new FakeDownload(rsrcC.getResource().getResource().getFile(),true)).when(localizer).download(isA(Path.class),eq(tRsrcC),isA(UserGroupInformation.class)); doReturn(new FakeDownload(rsrcD.getResource().getResource().getFile(),true)).when(localizer).download(isA(Path.class),eq(tRsrcD),isA(UserGroupInformation.class)); assertEquals(0,localizer.runLocalization(nmAddr)); for ( Path p : localDirs) { Path base=new Path(new Path(p,ContainerLocalizer.USERCACHE),appUser); Path privcache=new Path(base,ContainerLocalizer.FILECACHE); verify(spylfs).mkdir(eq(privcache),eq(CACHE_DIR_PERM),eq(false)); Path appDir=new Path(base,new Path(ContainerLocalizer.APPCACHE,appId)); Path appcache=new Path(appDir,ContainerLocalizer.FILECACHE); verify(spylfs).mkdir(eq(appcache),eq(CACHE_DIR_PERM),eq(false)); } verify(spylfs).open(tokenPath); verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcA.getResource()))); verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcB.getResource()))); verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcC.getResource()))); verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcD.getResource()))); verify(nmProxy,never()).heartbeat(argThat(new ArgumentMatcher(){ @Override public boolean matches( Object o){ LocalizerStatus status=(LocalizerStatus)o; return !containerId.equals(status.getLocalizerId()); } } )); }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestLocalCacheDirectoryManager

    IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=1000) public void testDirectoryStateChangeFromFullToNonFull(){ YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"40"); LocalCacheDirectoryManager dir=new LocalCacheDirectoryManager(conf); String rootPath=""; String firstSubDir="0"; for (int i=0; i < 4; i++) { Assert.assertEquals(rootPath,dir.getRelativePathForLocalization()); } dir.decrementFileCountForPath(rootPath); dir.decrementFileCountForPath(rootPath); Assert.assertEquals(rootPath,dir.getRelativePathForLocalization()); Assert.assertEquals(rootPath,dir.getRelativePathForLocalization()); Assert.assertEquals(firstSubDir,dir.getRelativePathForLocalization()); }

    APIUtilityVerifier IterativeVerifier EqualityVerifier 
    @Test public void testDirectoryConversion(){ for (int i=0; i < 10000; ++i) { String path=Directory.getRelativePath(i); Assert.assertEquals("Incorrect conversion for " + i,i,Directory.getDirectoryNumber(path)); } }

    NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testMinimumPerDirectoryFileLimit(){ YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"1"); Exception e=null; ResourceLocalizationService service=new ResourceLocalizationService(null,null,null,null,null); try { service.init(conf); } catch ( Exception e1) { e=e1; } Assert.assertNotNull(e); Assert.assertEquals(YarnRuntimeException.class,e.getClass()); Assert.assertEquals(e.getMessage(),YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY + " parameter is configured with a value less than 37."); }

    IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testHierarchicalSubDirectoryCreation(){ YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37"); LocalCacheDirectoryManager hDir=new LocalCacheDirectoryManager(conf); Assert.assertTrue(hDir.getRelativePathForLocalization().isEmpty()); for (int i=1; i <= 37 * 36 * 36; i++) { StringBuffer sb=new StringBuffer(); String num=Integer.toString(i - 1,36); if (num.length() == 1) { sb.append(num.charAt(0)); } else { sb.append(Integer.toString(Integer.parseInt(num.substring(0,1),36) - 1,36)); } for (int j=1; j < num.length(); j++) { sb.append(Path.SEPARATOR).append(num.charAt(j)); } Assert.assertEquals(sb.toString(),hDir.getRelativePathForLocalization()); } String testPath1="4"; String testPath2="2"; hDir.decrementFileCountForPath(testPath1); hDir.decrementFileCountForPath(testPath2); Assert.assertEquals(testPath1,hDir.getRelativePathForLocalization()); Assert.assertEquals(testPath2,hDir.getRelativePathForLocalization()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testIncrementFileCountForPath(){ YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,LocalCacheDirectoryManager.DIRECTORIES_PER_LEVEL + 2); LocalCacheDirectoryManager mgr=new LocalCacheDirectoryManager(conf); final String rootPath=""; mgr.incrementFileCountForPath(rootPath); Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization()); Assert.assertFalse("root dir should be full",rootPath.equals(mgr.getRelativePathForLocalization())); mgr.getRelativePathForLocalization(); mgr.decrementFileCountForPath(rootPath); mgr.decrementFileCountForPath(rootPath); Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization()); Assert.assertEquals(rootPath,mgr.getRelativePathForLocalization()); String otherDir=mgr.getRelativePathForLocalization(); Assert.assertFalse("root dir should be full",otherDir.equals(rootPath)); final String deepDir0="d/e/e/p/0"; final String deepDir1="d/e/e/p/1"; final String deepDir2="d/e/e/p/2"; final String deepDir3="d/e/e/p/3"; mgr.incrementFileCountForPath(deepDir0); Assert.assertEquals(otherDir,mgr.getRelativePathForLocalization()); Assert.assertEquals(deepDir0,mgr.getRelativePathForLocalization()); Assert.assertEquals("total dir count incorrect after increment",deepDir1,mgr.getRelativePathForLocalization()); mgr.incrementFileCountForPath(deepDir2); mgr.incrementFileCountForPath(deepDir1); mgr.incrementFileCountForPath(deepDir2); Assert.assertEquals(deepDir3,mgr.getRelativePathForLocalization()); }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestLocalResourcesTrackerImpl

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testStateStoreSuccessfulLocalization() throws Exception { final String user="someuser"; final ApplicationId appId=ApplicationId.newInstance(1,1); final Path localDir=new Path("/tmp"); Configuration conf=new YarnConfiguration(); DrainDispatcher dispatcher=null; dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); DeletionService mockDelService=mock(DeletionService.class); NMStateStoreService stateStore=mock(NMStateStoreService.class); try { LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION); LocalizerContext lc1=new LocalizerContext(user,cId1,null); ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.APPLICATION,lc1); tracker.handle(reqEvent1); dispatcher.await(); Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir); ArgumentCaptor localResourceCaptor=ArgumentCaptor.forClass(LocalResourceProto.class); ArgumentCaptor pathCaptor=ArgumentCaptor.forClass(Path.class); verify(stateStore).startResourceLocalization(eq(user),eq(appId),localResourceCaptor.capture(),pathCaptor.capture()); LocalResourceProto lrProto=localResourceCaptor.getValue(); Path localizedPath1=pathCaptor.getValue(); Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(lrProto))); Assert.assertEquals(hierarchicalPath1,localizedPath1.getParent()); ResourceLocalizedEvent rle1=new ResourceLocalizedEvent(lr1,pathCaptor.getValue(),120); tracker.handle(rle1); dispatcher.await(); ArgumentCaptor localizedProtoCaptor=ArgumentCaptor.forClass(LocalizedResourceProto.class); verify(stateStore).finishResourceLocalization(eq(user),eq(appId),localizedProtoCaptor.capture()); LocalizedResourceProto localizedProto=localizedProtoCaptor.getValue(); Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(localizedProto.getResource()))); Assert.assertEquals(localizedPath1.toString(),localizedProto.getLocalPath()); LocalizedResource localizedRsrc1=tracker.getLocalizedResource(lr1); Assert.assertNotNull(localizedRsrc1); tracker.handle(new ResourceReleaseEvent(lr1,cId1)); dispatcher.await(); boolean removeResult=tracker.remove(localizedRsrc1,mockDelService); Assert.assertTrue(removeResult); verify(stateStore).removeLocalizedResource(eq(user),eq(appId),eq(localizedPath1)); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) @SuppressWarnings("unchecked") public void test(){ String user="testuser"; DrainDispatcher dispatcher=null; try { Configuration conf=new Configuration(); dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); DeletionService mockDelService=mock(DeletionService.class); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalizerContext lc1=new LocalizerContext(user,cId1,null); ContainerId cId2=BuilderUtils.newContainerId(1,1,1,2); LocalizerContext lc2=new LocalizerContext(user,cId2,null); LocalResourceRequest req1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC); LocalResourceRequest req2=createLocalResourceRequest(user,2,1,LocalResourceVisibility.PUBLIC); LocalizedResource lr1=createLocalizedResource(req1,dispatcher); LocalizedResource lr2=createLocalizedResource(req2,dispatcher); ConcurrentMap localrsrc=new ConcurrentHashMap(); localrsrc.put(req1,lr1); localrsrc.put(req2,lr2); LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,false,conf,new NMNullStateStoreService()); ResourceEvent req11Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc1); ResourceEvent req12Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc2); ResourceEvent req21Event=new ResourceRequestEvent(req2,LocalResourceVisibility.PUBLIC,lc1); ResourceEvent rel11Event=new ResourceReleaseEvent(req1,cId1); ResourceEvent rel12Event=new ResourceReleaseEvent(req1,cId2); ResourceEvent rel21Event=new ResourceReleaseEvent(req2,cId1); tracker.handle(req11Event); tracker.handle(req12Event); tracker.handle(req21Event); dispatcher.await(); verify(localizerEventHandler,times(3)).handle(any(LocalizerResourceRequestEvent.class)); Assert.assertEquals(2,lr1.getRefCount()); Assert.assertEquals(1,lr2.getRefCount()); tracker.handle(rel21Event); dispatcher.await(); verifyTrackedResourceCount(tracker,2); Assert.assertEquals(2,lr1.getRefCount()); Assert.assertFalse(tracker.remove(lr1,mockDelService)); verifyTrackedResourceCount(tracker,2); ResourceLocalizedEvent rle=new ResourceLocalizedEvent(req1,new Path("file:///tmp/r1"),1); lr1.handle(rle); Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED)); tracker.handle(rel11Event); tracker.handle(rel12Event); Assert.assertEquals(0,lr1.getRefCount()); Assert.assertTrue(tracker.remove(lr1,mockDelService)); verifyTrackedResourceCount(tracker,1); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testRecoveredResourceWithDirCacheMgr() throws Exception { final String user="someuser"; final ApplicationId appId=ApplicationId.newInstance(1,1); final Path localDirRoot=new Path("/tmp/localdir"); Configuration conf=new YarnConfiguration(); DrainDispatcher dispatcher=null; dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); NMStateStoreService stateStore=mock(NMStateStoreService.class); try { LocalResourcesTrackerImpl tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,true,conf,stateStore); LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr1)); final long localizedId1=52; Path hierarchicalPath1=new Path(localDirRoot + "/4/2",Long.toString(localizedId1)); Path localizedPath1=new Path(hierarchicalPath1,"resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr1,localizedPath1,120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr1)); LocalCacheDirectoryManager dirMgrRoot=tracker.getDirectoryManager(localDirRoot); Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(1,dirMgrRoot.getDirectory("4/2").getCount()); LocalResourceRequest lr2=createLocalResourceRequest(user,2,2,LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr2)); final long localizedId2=localizedId1 + 1; Path hierarchicalPath2=new Path(localDirRoot + "/4/2",Long.toString(localizedId2)); Path localizedPath2=new Path(hierarchicalPath2,"resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr2,localizedPath2,120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr2)); Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount()); LocalResourceRequest lr3=createLocalResourceRequest(user,3,3,LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr3)); final long localizedId3=128; Path hierarchicalPath3=new Path(localDirRoot + "/4/3",Long.toString(localizedId3)); Path localizedPath3=new Path(hierarchicalPath3,"resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr3,localizedPath3,120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr3)); Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount()); Assert.assertEquals(1,dirMgrRoot.getDirectory("4/3").getCount()); LocalResourceRequest lr4=createLocalResourceRequest(user,4,4,LocalResourceVisibility.PUBLIC); Assert.assertNull(tracker.getLocalizedResource(lr4)); final long localizedId4=256; Path hierarchicalPath4=new Path(localDirRoot + "/4",Long.toString(localizedId4)); Path localizedPath4=new Path(hierarchicalPath4,"resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr4,localizedPath4,120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr4)); Assert.assertEquals(0,dirMgrRoot.getDirectory("").getCount()); Assert.assertEquals(1,dirMgrRoot.getDirectory("4").getCount()); Assert.assertEquals(2,dirMgrRoot.getDirectory("4/2").getCount()); Assert.assertEquals(1,dirMgrRoot.getDirectory("4/3").getCount()); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testRecoveredResource() throws Exception { final String user="someuser"; final ApplicationId appId=ApplicationId.newInstance(1,1); final Path localDir=new Path("/tmp/localdir"); Configuration conf=new YarnConfiguration(); DrainDispatcher dispatcher=null; dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); NMStateStoreService stateStore=mock(NMStateStoreService.class); try { LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION); Assert.assertNull(tracker.getLocalizedResource(lr1)); final long localizedId1=52; Path hierarchicalPath1=new Path(localDir,Long.toString(localizedId1)); Path localizedPath1=new Path(hierarchicalPath1,"resource.jar"); tracker.handle(new ResourceRecoveredEvent(lr1,localizedPath1,120)); dispatcher.await(); Assert.assertNotNull(tracker.getLocalizedResource(lr1)); LocalResourceRequest lr2=createLocalResourceRequest(user,2,2,LocalResourceVisibility.APPLICATION); LocalizerContext lc2=new LocalizerContext(user,cId1,null); ResourceEvent reqEvent2=new ResourceRequestEvent(lr2,LocalResourceVisibility.APPLICATION,lc2); tracker.handle(reqEvent2); dispatcher.await(); Path hierarchicalPath2=tracker.getPathForLocalization(lr2,localDir); long localizedId2=Long.parseLong(hierarchicalPath2.getName()); Assert.assertEquals(localizedId1 + 1,localizedId2); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=1000) @SuppressWarnings("unchecked") public void testLocalResourceCache(){ String user="testuser"; DrainDispatcher dispatcher=null; try { Configuration conf=new Configuration(); dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); ConcurrentMap localrsrc=new ConcurrentHashMap(); LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,true,conf,new NMNullStateStoreService()); LocalResourceRequest lr=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalizerContext lc1=new LocalizerContext(user,cId1,null); ResourceEvent reqEvent1=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc1); Assert.assertEquals(0,localrsrc.size()); tracker.handle(reqEvent1); dispatcher.await(); Assert.assertEquals(1,localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1,localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId1)); Assert.assertEquals(ResourceState.DOWNLOADING,localrsrc.get(lr).getState()); ContainerId cId2=BuilderUtils.newContainerId(1,1,1,2); LocalizerContext lc2=new LocalizerContext(user,cId2,null); ResourceEvent reqEvent2=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc2); tracker.handle(reqEvent2); dispatcher.await(); Assert.assertEquals(2,localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId2)); ResourceEvent resourceFailedEvent=new ResourceFailedLocalizationEvent(lr,(new Exception("test").getMessage())); LocalizedResource localizedResource=localrsrc.get(lr); tracker.handle(resourceFailedEvent); dispatcher.await(); Assert.assertEquals(0,localrsrc.size()); verify(containerEventHandler,times(2)).handle(isA(ContainerResourceFailedEvent.class)); Assert.assertEquals(ResourceState.FAILED,localizedResource.getState()); ResourceReleaseEvent relEvent1=new ResourceReleaseEvent(lr,cId1); tracker.handle(relEvent1); dispatcher.await(); ContainerId cId3=BuilderUtils.newContainerId(1,1,1,3); LocalizerContext lc3=new LocalizerContext(user,cId3,null); ResourceEvent reqEvent3=new ResourceRequestEvent(lr,LocalResourceVisibility.PRIVATE,lc3); tracker.handle(reqEvent3); dispatcher.await(); Assert.assertEquals(1,localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1,localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3)); ResourceReleaseEvent relEvent2=new ResourceReleaseEvent(lr,cId2); tracker.handle(relEvent2); dispatcher.await(); Assert.assertEquals(1,localrsrc.size()); Assert.assertTrue(localrsrc.containsKey(lr)); Assert.assertEquals(1,localrsrc.get(lr).getRefCount()); Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3)); Path localizedPath=new Path("/tmp/file1"); ResourceLocalizedEvent localizedEvent=new ResourceLocalizedEvent(lr,localizedPath,123L); tracker.handle(localizedEvent); dispatcher.await(); verify(containerEventHandler,times(1)).handle(isA(ContainerResourceLocalizedEvent.class)); Assert.assertEquals(ResourceState.LOCALIZED,localrsrc.get(lr).getState()); Assert.assertEquals(1,localrsrc.get(lr).getRefCount()); ResourceReleaseEvent relEvent3=new ResourceReleaseEvent(lr,cId3); tracker.handle(relEvent3); dispatcher.await(); Assert.assertEquals(0,localrsrc.get(lr).getRefCount()); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test @SuppressWarnings("unchecked") public void testStateStoreFailedLocalization() throws Exception { final String user="someuser"; final ApplicationId appId=ApplicationId.newInstance(1,1); final Path localDir=new Path("/tmp"); Configuration conf=new YarnConfiguration(); DrainDispatcher dispatcher=null; dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); NMStateStoreService stateStore=mock(NMStateStoreService.class); try { LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,appId,dispatcher,false,conf,stateStore); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.APPLICATION); LocalizerContext lc1=new LocalizerContext(user,cId1,null); ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.APPLICATION,lc1); tracker.handle(reqEvent1); dispatcher.await(); Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir); ArgumentCaptor localResourceCaptor=ArgumentCaptor.forClass(LocalResourceProto.class); ArgumentCaptor pathCaptor=ArgumentCaptor.forClass(Path.class); verify(stateStore).startResourceLocalization(eq(user),eq(appId),localResourceCaptor.capture(),pathCaptor.capture()); LocalResourceProto lrProto=localResourceCaptor.getValue(); Path localizedPath1=pathCaptor.getValue(); Assert.assertEquals(lr1,new LocalResourceRequest(new LocalResourcePBImpl(lrProto))); Assert.assertEquals(hierarchicalPath1,localizedPath1.getParent()); ResourceFailedLocalizationEvent rfe1=new ResourceFailedLocalizationEvent(lr1,new Exception("Test").toString()); tracker.handle(rfe1); dispatcher.await(); verify(stateStore).removeLocalizedResource(eq(user),eq(appId),eq(localizedPath1)); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

    APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=100000) @SuppressWarnings("unchecked") public void testHierarchicalLocalCacheDirectories(){ String user="testuser"; DrainDispatcher dispatcher=null; try { Configuration conf=new Configuration(); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37"); dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); DeletionService mockDelService=mock(DeletionService.class); ConcurrentMap localrsrc=new ConcurrentHashMap(); LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,true,conf,new NMNullStateStoreService()); Path localDir=new Path("/tmp"); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalResourceRequest lr1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC); LocalizerContext lc1=new LocalizerContext(user,cId1,null); ResourceEvent reqEvent1=new ResourceRequestEvent(lr1,LocalResourceVisibility.PUBLIC,lc1); tracker.handle(reqEvent1); Path hierarchicalPath1=tracker.getPathForLocalization(lr1,localDir).getParent(); ResourceLocalizedEvent rle1=new ResourceLocalizedEvent(lr1,new Path(hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "file1"),120); tracker.handle(rle1); LocalResourceRequest lr2=createLocalResourceRequest(user,3,3,LocalResourceVisibility.PUBLIC); ResourceEvent reqEvent2=new ResourceRequestEvent(lr2,LocalResourceVisibility.PUBLIC,lc1); tracker.handle(reqEvent2); Path hierarchicalPath2=tracker.getPathForLocalization(lr2,localDir).getParent(); ResourceFailedLocalizationEvent rfe2=new ResourceFailedLocalizationEvent(lr2,new Exception("Test").toString()); tracker.handle(rfe2); Assert.assertNotSame(hierarchicalPath1,hierarchicalPath2); LocalResourceRequest lr3=createLocalResourceRequest(user,2,2,LocalResourceVisibility.PUBLIC); ResourceEvent reqEvent3=new ResourceRequestEvent(lr3,LocalResourceVisibility.PUBLIC,lc1); tracker.handle(reqEvent3); Path hierarchicalPath3=tracker.getPathForLocalization(lr3,localDir).getParent(); ResourceLocalizedEvent rle3=new ResourceLocalizedEvent(lr3,new Path(hierarchicalPath3.toUri().toString() + Path.SEPARATOR + "file3"),120); tracker.handle(rle3); Assert.assertEquals(hierarchicalPath3.toUri().toString(),hierarchicalPath1.toUri().toString() + Path.SEPARATOR + "0"); ResourceEvent relEvent1=new ResourceReleaseEvent(lr1,cId1); tracker.handle(relEvent1); int resources=0; Iterator iter=tracker.iterator(); while (iter.hasNext()) { iter.next(); resources++; } Assert.assertEquals(2,resources); iter=tracker.iterator(); while (iter.hasNext()) { LocalizedResource rsrc=iter.next(); if (rsrc.getRefCount() == 0) { Assert.assertTrue(tracker.remove(rsrc,mockDelService)); resources--; } } Assert.assertEquals(1,resources); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

    APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) @SuppressWarnings("unchecked") public void testConsistency(){ String user="testuser"; DrainDispatcher dispatcher=null; try { Configuration conf=new Configuration(); dispatcher=createDispatcher(conf); EventHandler localizerEventHandler=mock(EventHandler.class); EventHandler containerEventHandler=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerEventHandler); dispatcher.register(ContainerEventType.class,containerEventHandler); ContainerId cId1=BuilderUtils.newContainerId(1,1,1,1); LocalizerContext lc1=new LocalizerContext(user,cId1,null); LocalResourceRequest req1=createLocalResourceRequest(user,1,1,LocalResourceVisibility.PUBLIC); LocalizedResource lr1=createLocalizedResource(req1,dispatcher); ConcurrentMap localrsrc=new ConcurrentHashMap(); localrsrc.put(req1,lr1); LocalResourcesTracker tracker=new LocalResourcesTrackerImpl(user,null,dispatcher,localrsrc,false,conf,new NMNullStateStoreService()); ResourceEvent req11Event=new ResourceRequestEvent(req1,LocalResourceVisibility.PUBLIC,lc1); ResourceEvent rel11Event=new ResourceReleaseEvent(req1,cId1); tracker.handle(req11Event); dispatcher.await(); Assert.assertEquals(1,lr1.getRefCount()); dispatcher.await(); verifyTrackedResourceCount(tracker,1); ResourceLocalizedEvent rle=new ResourceLocalizedEvent(req1,new Path("file:///tmp/r1"),1); lr1.handle(rle); Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED)); Assert.assertTrue(createdummylocalizefile(new Path("file:///tmp/r1"))); LocalizedResource rsrcbefore=tracker.iterator().next(); File resFile=new File(lr1.getLocalPath().toUri().getRawPath().toString()); Assert.assertTrue(resFile.exists()); Assert.assertTrue(resFile.delete()); tracker.handle(req11Event); dispatcher.await(); lr1.handle(rle); Assert.assertTrue(lr1.getState().equals(ResourceState.LOCALIZED)); LocalizedResource rsrcafter=tracker.iterator().next(); if (rsrcbefore == rsrcafter) { Assert.fail("Localized resource should not be equal"); } tracker.handle(rel11Event); } finally { if (dispatcher != null) { dispatcher.stop(); } } }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestLocalizedResource

    InternalCallVerifier EqualityVerifier 
    @Test @SuppressWarnings("unchecked") public void testNotification() throws Exception { DrainDispatcher dispatcher=new DrainDispatcher(); dispatcher.init(new Configuration()); try { dispatcher.start(); EventHandler containerBus=mock(EventHandler.class); EventHandler localizerBus=mock(EventHandler.class); dispatcher.register(ContainerEventType.class,containerBus); dispatcher.register(LocalizerEventType.class,localizerBus); LocalResource apiRsrc=createMockResource(); final ContainerId container0=getMockContainer(0); final Credentials creds0=new Credentials(); final LocalResourceVisibility vis0=LocalResourceVisibility.PRIVATE; final LocalizerContext ctxt0=new LocalizerContext("yak",container0,creds0); LocalResourceRequest rsrcA=new LocalResourceRequest(apiRsrc); LocalizedResource local=new LocalizedResource(rsrcA,dispatcher); local.handle(new ResourceRequestEvent(rsrcA,vis0,ctxt0)); dispatcher.await(); LocalizerEventMatcher matchesL0Req=new LocalizerEventMatcher(container0,creds0,vis0,LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION); verify(localizerBus).handle(argThat(matchesL0Req)); assertEquals(ResourceState.DOWNLOADING,local.getState()); final Credentials creds1=new Credentials(); final ContainerId container1=getMockContainer(1); final LocalizerContext ctxt1=new LocalizerContext("yak",container1,creds1); final LocalResourceVisibility vis1=LocalResourceVisibility.PUBLIC; local.handle(new ResourceRequestEvent(rsrcA,vis1,ctxt1)); dispatcher.await(); LocalizerEventMatcher matchesL1Req=new LocalizerEventMatcher(container1,creds1,vis1,LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION); verify(localizerBus).handle(argThat(matchesL1Req)); local.handle(new ResourceReleaseEvent(rsrcA,container0)); dispatcher.await(); verify(containerBus,never()).handle(isA(ContainerEvent.class)); assertEquals(ResourceState.DOWNLOADING,local.getState()); local.handle(new ResourceReleaseEvent(rsrcA,container1)); dispatcher.await(); verify(containerBus,never()).handle(isA(ContainerEvent.class)); assertEquals(ResourceState.DOWNLOADING,local.getState()); final ContainerId container2=getMockContainer(2); final LocalResourceVisibility vis2=LocalResourceVisibility.PRIVATE; final Credentials creds2=new Credentials(); final LocalizerContext ctxt2=new LocalizerContext("yak",container2,creds2); final ContainerId container3=getMockContainer(3); final LocalResourceVisibility vis3=LocalResourceVisibility.PRIVATE; final Credentials creds3=new Credentials(); final LocalizerContext ctxt3=new LocalizerContext("yak",container3,creds3); local.handle(new ResourceRequestEvent(rsrcA,vis2,ctxt2)); local.handle(new ResourceRequestEvent(rsrcA,vis3,ctxt3)); dispatcher.await(); LocalizerEventMatcher matchesL2Req=new LocalizerEventMatcher(container2,creds2,vis2,LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION); verify(localizerBus).handle(argThat(matchesL2Req)); LocalizerEventMatcher matchesL3Req=new LocalizerEventMatcher(container3,creds3,vis3,LocalizerEventType.REQUEST_RESOURCE_LOCALIZATION); verify(localizerBus).handle(argThat(matchesL3Req)); Path locA=new Path("file:///cache/rsrcA"); local.handle(new ResourceLocalizedEvent(rsrcA,locA,10)); dispatcher.await(); ContainerEventMatcher matchesC2Localized=new ContainerEventMatcher(container2,ContainerEventType.RESOURCE_LOCALIZED); ContainerEventMatcher matchesC3Localized=new ContainerEventMatcher(container3,ContainerEventType.RESOURCE_LOCALIZED); verify(containerBus).handle(argThat(matchesC2Localized)); verify(containerBus).handle(argThat(matchesC3Localized)); assertEquals(ResourceState.LOCALIZED,local.getState()); final ContainerId container4=getMockContainer(4); final Credentials creds4=new Credentials(); final LocalizerContext ctxt4=new LocalizerContext("yak",container4,creds4); final LocalResourceVisibility vis4=LocalResourceVisibility.PRIVATE; local.handle(new ResourceRequestEvent(rsrcA,vis4,ctxt4)); dispatcher.await(); ContainerEventMatcher matchesC4Localized=new ContainerEventMatcher(container4,ContainerEventType.RESOURCE_LOCALIZED); verify(containerBus).handle(argThat(matchesC4Localized)); assertEquals(ResourceState.LOCALIZED,local.getState()); } finally { dispatcher.stop(); } }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestResourceLocalizationService

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testRecovery() throws Exception { final String user1="user1"; final String user2="user2"; final ApplicationId appId1=ApplicationId.newInstance(1,1); final ApplicationId appId2=ApplicationId.newInstance(1,2); List localDirs=new ArrayList(); String[] sDirs=new String[4]; for (int i=0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir,i + ""))); sDirs[i]=localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true); NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); DrainDispatcher dispatcher=new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); EventHandler applicationBus=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher.register(ContainerEventType.class,containerBus); EventHandler localizerBus=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerBus); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); ResourceLocalizationService spyService=createSpyService(dispatcher,dirsHandler,stateStore); try { spyService.init(conf); spyService.start(); final Application app1=mock(Application.class); when(app1.getUser()).thenReturn(user1); when(app1.getAppId()).thenReturn(appId1); final Application app2=mock(Application.class); when(app2.getUser()).thenReturn(user2); when(app2.getAppId()).thenReturn(appId2); spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app1)); spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app2)); dispatcher.await(); LocalResourcesTracker appTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user1,appId1); LocalResourcesTracker privTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user1,null); LocalResourcesTracker appTracker2=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user2,appId2); LocalResourcesTracker pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,null,null); final Container c1=getMockContainer(appId1,1,user1); final Container c2=getMockContainer(appId2,2,user2); Random r=new Random(); long seed=r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); final LocalResource privResource1=getPrivateMockedResource(r); final LocalResourceRequest privReq1=new LocalResourceRequest(privResource1); final LocalResource privResource2=getPrivateMockedResource(r); final LocalResourceRequest privReq2=new LocalResourceRequest(privResource2); final LocalResource pubResource1=getPublicMockedResource(r); final LocalResourceRequest pubReq1=new LocalResourceRequest(pubResource1); final LocalResource pubResource2=getPublicMockedResource(r); final LocalResourceRequest pubReq2=new LocalResourceRequest(pubResource2); final LocalResource appResource1=getAppMockedResource(r); final LocalResourceRequest appReq1=new LocalResourceRequest(appResource1); final LocalResource appResource2=getAppMockedResource(r); final LocalResourceRequest appReq2=new LocalResourceRequest(appResource2); final LocalResource appResource3=getAppMockedResource(r); final LocalResourceRequest appReq3=new LocalResourceRequest(appResource3); Map> req1=new HashMap>(); req1.put(LocalResourceVisibility.PRIVATE,Arrays.asList(new LocalResourceRequest[]{privReq1,privReq2})); req1.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq1)); req1.put(LocalResourceVisibility.APPLICATION,Collections.singletonList(appReq1)); Map> req2=new HashMap>(); req2.put(LocalResourceVisibility.APPLICATION,Arrays.asList(new LocalResourceRequest[]{appReq2,appReq3})); req2.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq2)); spyService.handle(new ContainerLocalizationRequestEvent(c1,req1)); spyService.handle(new ContainerLocalizationRequestEvent(c2,req2)); dispatcher.await(); privTracker1.getPathForLocalization(privReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + user1)); privTracker1.getPathForLocalization(privReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.USERCACHE + user1)); LocalizedResource privLr1=privTracker1.getLocalizedResource(privReq1); LocalizedResource privLr2=privTracker1.getLocalizedResource(privReq2); appTracker1.getPathForLocalization(appReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId1)); LocalizedResource appLr1=appTracker1.getLocalizedResource(appReq1); appTracker2.getPathForLocalization(appReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId2)); LocalizedResource appLr2=appTracker2.getLocalizedResource(appReq2); appTracker2.getPathForLocalization(appReq3,dirsHandler.getLocalPathForWrite(ContainerLocalizer.APPCACHE + appId2)); LocalizedResource appLr3=appTracker2.getLocalizedResource(appReq3); pubTracker.getPathForLocalization(pubReq1,dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE)); LocalizedResource pubLr1=pubTracker.getLocalizedResource(pubReq1); pubTracker.getPathForLocalization(pubReq2,dirsHandler.getLocalPathForWrite(ContainerLocalizer.FILECACHE)); LocalizedResource pubLr2=pubTracker.getLocalizedResource(pubReq2); assertNotNull("Localization not started",privLr1.getLocalPath()); privTracker1.handle(new ResourceLocalizedEvent(privReq1,privLr1.getLocalPath(),privLr1.getSize() + 5)); assertNotNull("Localization not started",privLr2.getLocalPath()); privTracker1.handle(new ResourceLocalizedEvent(privReq2,privLr2.getLocalPath(),privLr2.getSize() + 10)); assertNotNull("Localization not started",appLr1.getLocalPath()); appTracker1.handle(new ResourceLocalizedEvent(appReq1,appLr1.getLocalPath(),appLr1.getSize())); assertNotNull("Localization not started",appLr3.getLocalPath()); appTracker2.handle(new ResourceLocalizedEvent(appReq3,appLr3.getLocalPath(),appLr3.getSize() + 7)); assertNotNull("Localization not started",pubLr1.getLocalPath()); pubTracker.handle(new ResourceLocalizedEvent(pubReq1,pubLr1.getLocalPath(),pubLr1.getSize() + 1000)); assertNotNull("Localization not started",pubLr2.getLocalPath()); pubTracker.handle(new ResourceLocalizedEvent(pubReq2,pubLr2.getLocalPath(),pubLr2.getSize() + 99999)); dispatcher.await(); assertEquals(ResourceState.LOCALIZED,privLr1.getState()); assertEquals(ResourceState.LOCALIZED,privLr2.getState()); assertEquals(ResourceState.LOCALIZED,appLr1.getState()); assertEquals(ResourceState.DOWNLOADING,appLr2.getState()); assertEquals(ResourceState.LOCALIZED,appLr3.getState()); assertEquals(ResourceState.LOCALIZED,pubLr1.getState()); assertEquals(ResourceState.LOCALIZED,pubLr2.getState()); spyService=createSpyService(dispatcher,dirsHandler,stateStore); spyService.init(conf); spyService.recoverLocalizedResources(stateStore.loadLocalizationState()); dispatcher.await(); appTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user1,appId1); privTracker1=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user1,null); appTracker2=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user2,appId2); pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,null,null); LocalizedResource recoveredRsrc=privTracker1.getLocalizedResource(privReq1); assertEquals(privReq1,recoveredRsrc.getRequest()); assertEquals(privLr1.getLocalPath(),recoveredRsrc.getLocalPath()); assertEquals(privLr1.getSize(),recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState()); recoveredRsrc=privTracker1.getLocalizedResource(privReq2); assertEquals(privReq2,recoveredRsrc.getRequest()); assertEquals(privLr2.getLocalPath(),recoveredRsrc.getLocalPath()); assertEquals(privLr2.getSize(),recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState()); recoveredRsrc=appTracker1.getLocalizedResource(appReq1); assertEquals(appReq1,recoveredRsrc.getRequest()); assertEquals(appLr1.getLocalPath(),recoveredRsrc.getLocalPath()); assertEquals(appLr1.getSize(),recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState()); recoveredRsrc=appTracker2.getLocalizedResource(appReq2); assertNull("in-progress resource should not be present",recoveredRsrc); recoveredRsrc=appTracker2.getLocalizedResource(appReq3); assertEquals(appReq3,recoveredRsrc.getRequest()); assertEquals(appLr3.getLocalPath(),recoveredRsrc.getLocalPath()); assertEquals(appLr3.getSize(),recoveredRsrc.getSize()); assertEquals(ResourceState.LOCALIZED,recoveredRsrc.getState()); } finally { dispatcher.stop(); stateStore.close(); } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPrivateResource() throws Exception { DrainDispatcher dispatcher1=null; try { dispatcher1=new DrainDispatcher(); String user="testuser"; ApplicationId appId=BuilderUtils.newApplicationId(1,1); List localDirs=new ArrayList(); String[] sDirs=new String[1]; for (int i=0; i < 1; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir,i + ""))); sDirs[i]=localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); LocalDirsHandlerService localDirHandler=new LocalDirsHandlerService(); localDirHandler.init(conf); EventHandler applicationBus=mock(EventHandler.class); dispatcher1.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher1.register(ContainerEventType.class,containerBus); ContainerExecutor exec=mock(ContainerExecutor.class); DeletionService delService=mock(DeletionService.class); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); dispatcher1.init(conf); dispatcher1.start(); ResourceLocalizationService rls=new ResourceLocalizationService(dispatcher1,exec,delService,localDirHandler,new NMNullStateStoreService()); dispatcher1.register(LocalizationEventType.class,rls); rls.init(conf); rls.handle(createApplicationLocalizationEvent(user,appId)); LocalResourceRequest req=new LocalResourceRequest(new Path("file:///tmp"),123L,LocalResourceType.FILE,LocalResourceVisibility.PRIVATE,""); ContainerImpl container1=createMockContainer(user,1); String localizerId1=container1.getContainerId().toString(); rls.getPrivateLocalizers().put(localizerId1,rls.new LocalizerRunner(new LocalizerContext(user,container1.getContainerId(),null),localizerId1)); LocalizerRunner localizerRunner1=rls.getLocalizerRunner(localizerId1); dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container1,LocalResourceVisibility.PRIVATE,req)); Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId1,1,200)); ContainerImpl container2=createMockContainer(user,2); String localizerId2=container2.getContainerId().toString(); rls.getPrivateLocalizers().put(localizerId2,rls.new LocalizerRunner(new LocalizerContext(user,container2.getContainerId(),null),localizerId2)); LocalizerRunner localizerRunner2=rls.getLocalizerRunner(localizerId2); dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container2,LocalResourceVisibility.PRIVATE,req)); Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId2,1,200)); LocalResourcesTracker tracker=rls.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user,appId); LocalizedResource lr=tracker.getLocalizedResource(req); Assert.assertEquals(ResourceState.DOWNLOADING,lr.getState()); Assert.assertEquals(1,lr.sem.availablePermits()); LocalizerHeartbeatResponse response1=rls.heartbeat(createLocalizerStatus(localizerId1)); Assert.assertEquals(1,localizerRunner1.scheduled.size()); Assert.assertEquals(req.getResource(),response1.getResourceSpecs().get(0).getResource().getResource()); Assert.assertEquals(0,lr.sem.availablePermits()); LocalizerHeartbeatResponse response2=rls.heartbeat(createLocalizerStatus(localizerId2)); Assert.assertEquals(0,localizerRunner2.scheduled.size()); Assert.assertEquals(0,response2.getResourceSpecs().size()); rls.heartbeat(createLocalizerStatusForFailedResource(localizerId1,req)); Assert.assertTrue(waitForResourceState(lr,rls,req,LocalResourceVisibility.PRIVATE,user,appId,ResourceState.FAILED,200)); Assert.assertTrue(lr.getState().equals(ResourceState.FAILED)); Assert.assertEquals(0,localizerRunner1.scheduled.size()); response2=rls.heartbeat(createLocalizerStatus(localizerId2)); Assert.assertEquals(0,localizerRunner2.scheduled.size()); Assert.assertEquals(0,localizerRunner2.pending.size()); Assert.assertEquals(0,response2.getResourceSpecs().size()); } finally { if (dispatcher1 != null) { dispatcher1.stop(); } } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=100000) @SuppressWarnings("unchecked") public void testParallelDownloadAttemptsForPublicResource() throws Exception { DrainDispatcher dispatcher1=null; String user="testuser"; try { List localDirs=new ArrayList(); String[] sDirs=new String[1]; for (int i=0; i < 1; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir,i + ""))); sDirs[i]=localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); EventHandler applicationBus=mock(EventHandler.class); dispatcher1=new DrainDispatcher(); dispatcher1.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher1.register(ContainerEventType.class,containerBus); ContainerExecutor exec=mock(ContainerExecutor.class); DeletionService delService=mock(DeletionService.class); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); dispatcher1.init(conf); dispatcher1.start(); ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher1,exec,delService,dirsHandler,new NMNullStateStoreService()); ResourceLocalizationService spyService=spy(rawService); dispatcher1.register(LocalizationEventType.class,spyService); spyService.init(conf); Assert.assertEquals(0,spyService.getPublicLocalizer().pending.size()); LocalResourceRequest req=new LocalResourceRequest(new Path("/tmp"),123L,LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,""); ApplicationImpl app=mock(ApplicationImpl.class); ApplicationId appId=BuilderUtils.newApplicationId(1,1); when(app.getAppId()).thenReturn(appId); when(app.getUser()).thenReturn(user); dispatcher1.getEventHandler().handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app)); ContainerImpl container1=createMockContainer(user,1); dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container1,LocalResourceVisibility.PUBLIC,req)); Assert.assertTrue(waitForResourceState(null,spyService,req,LocalResourceVisibility.PUBLIC,user,null,ResourceState.DOWNLOADING,200)); Assert.assertTrue(waitForPublicDownloadToStart(spyService,1,200)); LocalizedResource lr=getLocalizedResource(spyService,req,LocalResourceVisibility.PUBLIC,user,null); Assert.assertEquals(ResourceState.DOWNLOADING,lr.getState()); Assert.assertEquals(1,spyService.getPublicLocalizer().pending.size()); Assert.assertEquals(0,lr.sem.availablePermits()); ContainerImpl container2=createMockContainer(user,2); dispatcher1.getEventHandler().handle(createContainerLocalizationEvent(container2,LocalResourceVisibility.PUBLIC,req)); Assert.assertFalse(waitForPublicDownloadToStart(spyService,2,100)); ResourceFailedLocalizationEvent locFailedEvent=new ResourceFailedLocalizationEvent(req,new Exception("test").toString()); spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,null).handle(locFailedEvent); Assert.assertTrue(waitForResourceState(lr,spyService,req,LocalResourceVisibility.PUBLIC,user,null,ResourceState.FAILED,200)); lr.unlock(); spyService.getPublicLocalizer().pending.clear(); LocalizerResourceRequestEvent localizerEvent=new LocalizerResourceRequestEvent(lr,null,mock(LocalizerContext.class),null); dispatcher1.getEventHandler().handle(localizerEvent); Assert.assertFalse(waitForPublicDownloadToStart(spyService,1,100)); Assert.assertEquals(1,lr.sem.availablePermits()); } finally { if (dispatcher1 != null) { dispatcher1.stop(); } } }

    APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) @SuppressWarnings("unchecked") public void testLocalResourcePath() throws Exception { DrainDispatcher dispatcher1=null; try { dispatcher1=new DrainDispatcher(); String user="testuser"; ApplicationId appId=BuilderUtils.newApplicationId(1,1); List localDirs=new ArrayList(); String[] sDirs=new String[1]; for (int i=0; i < 1; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir,i + ""))); sDirs[i]=localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); LocalDirsHandlerService localDirHandler=new LocalDirsHandlerService(); localDirHandler.init(conf); EventHandler applicationBus=mock(EventHandler.class); dispatcher1.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher1.register(ContainerEventType.class,containerBus); ContainerExecutor exec=mock(ContainerExecutor.class); DeletionService delService=mock(DeletionService.class); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); dispatcher1.init(conf); dispatcher1.start(); ResourceLocalizationService rls=new ResourceLocalizationService(dispatcher1,exec,delService,localDirHandler,new NMNullStateStoreService()); dispatcher1.register(LocalizationEventType.class,rls); rls.init(conf); rls.handle(createApplicationLocalizationEvent(user,appId)); Container container1=createMockContainer(user,1); String localizerId1=container1.getContainerId().toString(); rls.getPrivateLocalizers().put(localizerId1,rls.new LocalizerRunner(new LocalizerContext(user,container1.getContainerId(),null),localizerId1)); LocalResourceRequest reqPriv=new LocalResourceRequest(new Path("file:///tmp1"),123L,LocalResourceType.FILE,LocalResourceVisibility.PRIVATE,""); List privList=new ArrayList(); privList.add(reqPriv); LocalResourceRequest reqApp=new LocalResourceRequest(new Path("file:///tmp2"),123L,LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,""); List appList=new ArrayList(); appList.add(reqApp); Map> rsrcs=new HashMap>(); rsrcs.put(LocalResourceVisibility.APPLICATION,appList); rsrcs.put(LocalResourceVisibility.PRIVATE,privList); dispatcher1.getEventHandler().handle(new ContainerLocalizationRequestEvent(container1,rsrcs)); Assert.assertTrue(waitForPrivateDownloadToStart(rls,localizerId1,2,500)); String userCachePath=StringUtils.join(Path.SEPARATOR,Arrays.asList(localDirs.get(0).toUri().getRawPath(),ContainerLocalizer.USERCACHE,user,ContainerLocalizer.FILECACHE)); String userAppCachePath=StringUtils.join(Path.SEPARATOR,Arrays.asList(localDirs.get(0).toUri().getRawPath(),ContainerLocalizer.USERCACHE,user,ContainerLocalizer.APPCACHE,appId.toString(),ContainerLocalizer.FILECACHE)); int returnedResources=0; boolean appRsrc=false, privRsrc=false; while (returnedResources < 2) { LocalizerHeartbeatResponse response=rls.heartbeat(createLocalizerStatus(localizerId1)); for ( ResourceLocalizationSpec resourceSpec : response.getResourceSpecs()) { returnedResources++; Path destinationDirectory=new Path(resourceSpec.getDestinationDirectory().getFile()); if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.APPLICATION) { appRsrc=true; Assert.assertEquals(userAppCachePath,destinationDirectory.getParent().toUri().toString()); } else if (resourceSpec.getResource().getVisibility() == LocalResourceVisibility.PRIVATE) { privRsrc=true; Assert.assertEquals(userCachePath,destinationDirectory.getParent().toUri().toString()); } else { throw new Exception("Unexpected resource recevied."); } } } Assert.assertTrue(appRsrc && privRsrc); } finally { if (dispatcher1 != null) { dispatcher1.stop(); } } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) @SuppressWarnings("unchecked") public void testLocalizationHeartbeat() throws Exception { List localDirs=new ArrayList(); String[] sDirs=new String[1]; localDirs.add(lfs.makeQualified(new Path(basedir,0 + ""))); sDirs[0]=localDirs.get(0).toString(); conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); conf.set(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY,"37"); DrainDispatcher dispatcher=new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); EventHandler applicationBus=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher.register(ContainerEventType.class,containerBus); ContainerExecutor exec=mock(ContainerExecutor.class); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); DeletionService delServiceReal=new DeletionService(exec); DeletionService delService=spy(delServiceReal); delService.init(new Configuration()); delService.start(); ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher,exec,delService,dirsHandler,new NMNullStateStoreService()); ResourceLocalizationService spyService=spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class)); try { spyService.init(conf); spyService.start(); final Application app=mock(Application.class); final ApplicationId appId=BuilderUtils.newApplicationId(314159265358979L,3); when(app.getUser()).thenReturn("user0"); when(app.getAppId()).thenReturn(appId); spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app)); ArgumentMatcher matchesAppInit=new ArgumentMatcher(){ @Override public boolean matches( Object o){ ApplicationEvent evt=(ApplicationEvent)o; return evt.getType() == ApplicationEventType.APPLICATION_INITED && appId == evt.getApplicationID(); } } ; dispatcher.await(); verify(applicationBus).handle(argThat(matchesAppInit)); Random r=new Random(); long seed=r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); final Container c=getMockContainer(appId,42,"user0"); FSDataOutputStream out=new FSDataOutputStream(new DataOutputBuffer(),null); doReturn(out).when(spylfs).createInternal(isA(Path.class),isA(EnumSet.class),isA(FsPermission.class),anyInt(),anyShort(),anyLong(),isA(Progressable.class),isA(ChecksumOpt.class),anyBoolean()); final LocalResource resource1=getPrivateMockedResource(r); LocalResource resource2=null; do { resource2=getPrivateMockedResource(r); } while (resource2 == null || resource2.equals(resource1)); final LocalResourceRequest req1=new LocalResourceRequest(resource1); final LocalResourceRequest req2=new LocalResourceRequest(resource2); Map> rsrcs=new HashMap>(); List privateResourceList=new ArrayList(); privateResourceList.add(req1); privateResourceList.add(req2); rsrcs.put(LocalResourceVisibility.PRIVATE,privateResourceList); spyService.handle(new ContainerLocalizationRequestEvent(c,rsrcs)); Thread.sleep(1000); dispatcher.await(); String appStr=ConverterUtils.toString(appId); String ctnrStr=c.getContainerId().toString(); ArgumentCaptor tokenPathCaptor=ArgumentCaptor.forClass(Path.class); verify(exec).startLocalizer(tokenPathCaptor.capture(),isA(InetSocketAddress.class),eq("user0"),eq(appStr),eq(ctnrStr),isA(List.class),isA(List.class)); Path localizationTokenPath=tokenPathCaptor.getValue(); LocalResourceStatus rsrcStat1=mock(LocalResourceStatus.class); LocalResourceStatus rsrcStat2=mock(LocalResourceStatus.class); LocalizerStatus stat=mock(LocalizerStatus.class); when(stat.getLocalizerId()).thenReturn(ctnrStr); when(rsrcStat1.getResource()).thenReturn(resource1); when(rsrcStat2.getResource()).thenReturn(resource2); when(rsrcStat1.getLocalSize()).thenReturn(4344L); when(rsrcStat2.getLocalSize()).thenReturn(2342L); URL locPath=getPath("/cache/private/blah"); when(rsrcStat1.getLocalPath()).thenReturn(locPath); when(rsrcStat2.getLocalPath()).thenReturn(locPath); when(rsrcStat1.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS); when(rsrcStat2.getStatus()).thenReturn(ResourceStatusType.FETCH_SUCCESS); when(stat.getResources()).thenReturn(Collections.emptyList()).thenReturn(Collections.singletonList(rsrcStat1)).thenReturn(Collections.singletonList(rsrcStat2)).thenReturn(Collections.emptyList()); String localPath=Path.SEPARATOR + ContainerLocalizer.USERCACHE + Path.SEPARATOR+ "user0"+ Path.SEPARATOR+ ContainerLocalizer.FILECACHE; LocalizerHeartbeatResponse response=spyService.heartbeat(stat); assertEquals(LocalizerAction.LIVE,response.getLocalizerAction()); assertEquals(1,response.getResourceSpecs().size()); assertEquals(req1,new LocalResourceRequest(response.getResourceSpecs().get(0).getResource())); URL localizedPath=response.getResourceSpecs().get(0).getDestinationDirectory(); assertTrue(localizedPath.getFile().endsWith(localPath + Path.SEPARATOR + "10")); response=spyService.heartbeat(stat); assertEquals(LocalizerAction.LIVE,response.getLocalizerAction()); assertEquals(1,response.getResourceSpecs().size()); assertEquals(req2,new LocalResourceRequest(response.getResourceSpecs().get(0).getResource())); localizedPath=response.getResourceSpecs().get(0).getDestinationDirectory(); assertTrue(localizedPath.getFile().endsWith(localPath + Path.SEPARATOR + "0"+ Path.SEPARATOR+ "11")); response=spyService.heartbeat(stat); assertEquals(LocalizerAction.LIVE,response.getLocalizerAction()); assertEquals(0,response.getResourceSpecs().size()); response=spyService.heartbeat(stat); assertEquals(LocalizerAction.DIE,response.getLocalizerAction()); dispatcher.await(); ArgumentMatcher matchesContainerLoc=new ArgumentMatcher(){ @Override public boolean matches( Object o){ ContainerEvent evt=(ContainerEvent)o; return evt.getType() == ContainerEventType.RESOURCE_LOCALIZED && c.getContainerId() == evt.getContainerID(); } } ; verify(containerBus,times(2)).handle(argThat(matchesContainerLoc)); verify(delService).delete((String)isNull(),eq(localizationTokenPath)); } finally { spyService.stop(); dispatcher.stop(); delService.stop(); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test @SuppressWarnings("unchecked") public void testResourceRelease() throws Exception { List localDirs=new ArrayList(); String[] sDirs=new String[4]; for (int i=0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir,i + ""))); sDirs[i]=localDirs.get(i).toString(); } conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS,sDirs); LocalizerTracker mockLocallilzerTracker=mock(LocalizerTracker.class); DrainDispatcher dispatcher=new DrainDispatcher(); dispatcher.init(conf); dispatcher.start(); EventHandler applicationBus=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,applicationBus); EventHandler containerBus=mock(EventHandler.class); dispatcher.register(ContainerEventType.class,containerBus); EventHandler localizerBus=mock(EventHandler.class); dispatcher.register(LocalizerEventType.class,localizerBus); ContainerExecutor exec=mock(ContainerExecutor.class); LocalDirsHandlerService dirsHandler=new LocalDirsHandlerService(); dirsHandler.init(conf); DeletionService delService=new DeletionService(exec); delService.init(new Configuration()); delService.start(); ResourceLocalizationService rawService=new ResourceLocalizationService(dispatcher,exec,delService,dirsHandler,new NMNullStateStoreService()); ResourceLocalizationService spyService=spy(rawService); doReturn(mockServer).when(spyService).createServer(); doReturn(mockLocallilzerTracker).when(spyService).createLocalizerTracker(isA(Configuration.class)); doReturn(lfs).when(spyService).getLocalFileContext(isA(Configuration.class)); try { spyService.init(conf); spyService.start(); final String user="user0"; final Application app=mock(Application.class); final ApplicationId appId=BuilderUtils.newApplicationId(314159265358979L,3); when(app.getUser()).thenReturn(user); when(app.getAppId()).thenReturn(appId); spyService.handle(new ApplicationLocalizationEvent(LocalizationEventType.INIT_APPLICATION_RESOURCES,app)); dispatcher.await(); LocalResourcesTracker appTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.APPLICATION,user,appId); LocalResourcesTracker privTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PRIVATE,user,appId); LocalResourcesTracker pubTracker=spyService.getLocalResourcesTracker(LocalResourceVisibility.PUBLIC,user,appId); final Container c=getMockContainer(appId,42,user); Random r=new Random(); long seed=r.nextLong(); System.out.println("SEED: " + seed); r.setSeed(seed); final LocalResource privResource=getPrivateMockedResource(r); final LocalResourceRequest privReq=new LocalResourceRequest(privResource); final LocalResource pubResource=getPublicMockedResource(r); final LocalResourceRequest pubReq=new LocalResourceRequest(pubResource); final LocalResource pubResource2=getPublicMockedResource(r); final LocalResourceRequest pubReq2=new LocalResourceRequest(pubResource2); final LocalResource appResource=getAppMockedResource(r); final LocalResourceRequest appReq=new LocalResourceRequest(appResource); Map> req=new HashMap>(); req.put(LocalResourceVisibility.PRIVATE,Collections.singletonList(privReq)); req.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq)); req.put(LocalResourceVisibility.APPLICATION,Collections.singletonList(appReq)); Map> req2=new HashMap>(); req2.put(LocalResourceVisibility.PRIVATE,Collections.singletonList(privReq)); req2.put(LocalResourceVisibility.PUBLIC,Collections.singletonList(pubReq2)); Set pubRsrcs=new HashSet(); pubRsrcs.add(pubReq); pubRsrcs.add(pubReq2); spyService.handle(new ContainerLocalizationRequestEvent(c,req)); spyService.handle(new ContainerLocalizationRequestEvent(c,req2)); dispatcher.await(); int privRsrcCount=0; for ( LocalizedResource lr : privTracker) { privRsrcCount++; Assert.assertEquals("Incorrect reference count",2,lr.getRefCount()); Assert.assertEquals(privReq,lr.getRequest()); } Assert.assertEquals(1,privRsrcCount); int pubRsrcCount=0; for ( LocalizedResource lr : pubTracker) { pubRsrcCount++; Assert.assertEquals("Incorrect reference count",1,lr.getRefCount()); pubRsrcs.remove(lr.getRequest()); } Assert.assertEquals(0,pubRsrcs.size()); Assert.assertEquals(2,pubRsrcCount); int appRsrcCount=0; for ( LocalizedResource lr : appTracker) { appRsrcCount++; Assert.assertEquals("Incorrect reference count",1,lr.getRefCount()); Assert.assertEquals(appReq,lr.getRequest()); } Assert.assertEquals(1,appRsrcCount); spyService.handle(new ContainerLocalizationCleanupEvent(c,req)); verify(mockLocallilzerTracker).cleanupPrivLocalizers("container_314159265358979_0003_01_000042"); req2.remove(LocalResourceVisibility.PRIVATE); spyService.handle(new ContainerLocalizationCleanupEvent(c,req2)); dispatcher.await(); pubRsrcs.add(pubReq); pubRsrcs.add(pubReq2); privRsrcCount=0; for ( LocalizedResource lr : privTracker) { privRsrcCount++; Assert.assertEquals("Incorrect reference count",1,lr.getRefCount()); Assert.assertEquals(privReq,lr.getRequest()); } Assert.assertEquals(1,privRsrcCount); pubRsrcCount=0; for ( LocalizedResource lr : pubTracker) { pubRsrcCount++; Assert.assertEquals("Incorrect reference count",0,lr.getRefCount()); pubRsrcs.remove(lr.getRequest()); } Assert.assertEquals(0,pubRsrcs.size()); Assert.assertEquals(2,pubRsrcCount); appRsrcCount=0; for ( LocalizedResource lr : appTracker) { appRsrcCount++; Assert.assertEquals("Incorrect reference count",0,lr.getRefCount()); Assert.assertEquals(appReq,lr.getRequest()); } Assert.assertEquals(1,appRsrcCount); } finally { dispatcher.stop(); delService.stop(); } }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.TestLogAggregationService

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testLocalFileDeletionAfterUpload() throws Exception { this.delSrvc=new DeletionService(createContainerExecutor()); delSrvc=spy(delSrvc); this.delSrvc.init(conf); this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath()); DrainDispatcher dispatcher=createDispatcher(); EventHandler appEventHandler=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,appEventHandler); LogAggregationService logAggregationService=spy(new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler)); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1=BuilderUtils.newApplicationId(1234,1); File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1)); app1LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls)); ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(application1,1); ContainerId container11=BuilderUtils.newContainerId(appAttemptId,1); writeContainerLogs(app1LogDir,container11); logAggregationService.handle(new LogHandlerContainerFinishedEvent(container11,0)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application1)); logAggregationService.stop(); assertEquals(0,logAggregationService.getNumAggregators()); verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class)); verify(delSrvc).delete(eq(user),eq((Path)null),eq(new Path(app1LogDir.getAbsolutePath()))); delSrvc.stop(); String containerIdStr=ConverterUtils.toString(container11); File containerLogDir=new File(app1LogDir,containerIdStr); for ( String fileType : new String[]{"stdout","stderr","syslog"}) { File f=new File(containerLogDir,fileType); Assert.assertFalse("check " + f,f.exists()); } Assert.assertFalse(app1LogDir.exists()); Path logFilePath=logAggregationService.getRemoteNodeLogFileForApp(application1,this.user); Assert.assertTrue("Log file [" + logFilePath + "] not found",new File(logFilePath.toUri().getPath()).exists()); dispatcher.await(); ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(appAttemptId.getApplicationId(),ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(appAttemptId.getApplicationId(),ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)}; checkEvents(appEventHandler,expectedEvents,true,"getType","getApplicationID"); dispatcher.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test @SuppressWarnings("unchecked") public void testMultipleAppsLogAggregation() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath()); DrainDispatcher dispatcher=createDispatcher(); EventHandler appEventHandler=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,appEventHandler); LogAggregationService logAggregationService=new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1=BuilderUtils.newApplicationId(1234,1); File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1)); app1LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls)); ApplicationAttemptId appAttemptId1=BuilderUtils.newApplicationAttemptId(application1,1); ContainerId container11=BuilderUtils.newContainerId(appAttemptId1,1); writeContainerLogs(app1LogDir,container11); logAggregationService.handle(new LogHandlerContainerFinishedEvent(container11,0)); ApplicationId application2=BuilderUtils.newApplicationId(1234,2); ApplicationAttemptId appAttemptId2=BuilderUtils.newApplicationAttemptId(application2,1); File app2LogDir=new File(localLogDir,ConverterUtils.toString(application2)); app2LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application2,this.user,null,ContainerLogsRetentionPolicy.APPLICATION_MASTER_ONLY,this.acls)); ContainerId container21=BuilderUtils.newContainerId(appAttemptId2,1); writeContainerLogs(app2LogDir,container21); logAggregationService.handle(new LogHandlerContainerFinishedEvent(container21,0)); ContainerId container12=BuilderUtils.newContainerId(appAttemptId1,2); writeContainerLogs(app1LogDir,container12); logAggregationService.handle(new LogHandlerContainerFinishedEvent(container12,0)); ApplicationId application3=BuilderUtils.newApplicationId(1234,3); ApplicationAttemptId appAttemptId3=BuilderUtils.newApplicationAttemptId(application3,1); File app3LogDir=new File(localLogDir,ConverterUtils.toString(application3)); app3LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application3,this.user,null,ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY,this.acls)); dispatcher.await(); ApplicationEvent expectedInitEvents[]=new ApplicationEvent[]{new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(application2,ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(application3,ApplicationEventType.APPLICATION_LOG_HANDLING_INITED)}; checkEvents(appEventHandler,expectedInitEvents,false,"getType","getApplicationID"); reset(appEventHandler); ContainerId container31=BuilderUtils.newContainerId(appAttemptId3,1); writeContainerLogs(app3LogDir,container31); logAggregationService.handle(new LogHandlerContainerFinishedEvent(container31,0)); ContainerId container32=BuilderUtils.newContainerId(appAttemptId3,2); writeContainerLogs(app3LogDir,container32); logAggregationService.handle(new LogHandlerContainerFinishedEvent(container32,1)); ContainerId container22=BuilderUtils.newContainerId(appAttemptId2,2); writeContainerLogs(app2LogDir,container22); logAggregationService.handle(new LogHandlerContainerFinishedEvent(container22,0)); ContainerId container33=BuilderUtils.newContainerId(appAttemptId3,3); writeContainerLogs(app3LogDir,container33); logAggregationService.handle(new LogHandlerContainerFinishedEvent(container33,0)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application2)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application3)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application1)); logAggregationService.stop(); assertEquals(0,logAggregationService.getNumAggregators()); verifyContainerLogs(logAggregationService,application1,new ContainerId[]{container11,container12}); verifyContainerLogs(logAggregationService,application2,new ContainerId[]{container21}); verifyContainerLogs(logAggregationService,application3,new ContainerId[]{container31,container32}); dispatcher.await(); ApplicationEvent[] expectedFinishedEvents=new ApplicationEvent[]{new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED),new ApplicationEvent(application2,ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED),new ApplicationEvent(application3,ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)}; checkEvents(appEventHandler,expectedFinishedEvents,false,"getType","getApplicationID"); dispatcher.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test @SuppressWarnings("unchecked") public void testLogAggregationCreateDirsFailsWithoutKillingNM() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath()); DrainDispatcher dispatcher=createDispatcher(); EventHandler appEventHandler=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,appEventHandler); LogAggregationService logAggregationService=spy(new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler)); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId appId=BuilderUtils.newApplicationId(System.currentTimeMillis(),(int)Math.random()); Exception e=new RuntimeException("KABOOM!"); doThrow(e).when(logAggregationService).createAppDir(any(String.class),any(ApplicationId.class),any(UserGroupInformation.class)); logAggregationService.handle(new LogHandlerAppStartedEvent(appId,this.user,null,ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY,this.acls)); dispatcher.await(); ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(appId,ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED)}; checkEvents(appEventHandler,expectedEvents,false,"getType","getApplicationID","getDiagnostic"); verify(logAggregationService).closeFileSystems(any(UserGroupInformation.class)); logAggregationService.handle(new LogHandlerContainerFinishedEvent(BuilderUtils.newContainerId(4,1,1,1),0)); dispatcher.await(); logAggregationService.handle(new LogHandlerAppFinishedEvent(BuilderUtils.newApplicationId(1,5))); dispatcher.await(); logAggregationService.stop(); assertEquals(0,logAggregationService.getNumAggregators()); }

    InternalCallVerifier EqualityVerifier 
    @Test @SuppressWarnings("unchecked") public void testLogAggregatorCleanup() throws Exception { DeletionService delSrvc=mock(DeletionService.class); LocalDirsHandlerService mockedDirSvc=mock(LocalDirsHandlerService.class); DrainDispatcher dispatcher=createDispatcher(); EventHandler appEventHandler=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,appEventHandler); LogAggregationService logAggregationService=new LogAggregationService(dispatcher,this.context,delSrvc,mockedDirSvc); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1=BuilderUtils.newApplicationId(1234,1); logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application1)); dispatcher.await(); int timeToWait=20 * 1000; while (timeToWait > 0 && logAggregationService.getNumAggregators() > 0) { Thread.sleep(100); timeToWait-=100; } Assert.assertEquals("Log aggregator failed to cleanup!",0,logAggregationService.getNumAggregators()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test @SuppressWarnings("unchecked") public void testNoContainerOnNode() throws Exception { this.conf.set(YarnConfiguration.NM_LOG_DIRS,localLogDir.getAbsolutePath()); this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,this.remoteRootLogDir.getAbsolutePath()); DrainDispatcher dispatcher=createDispatcher(); EventHandler appEventHandler=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,appEventHandler); LogAggregationService logAggregationService=new LogAggregationService(dispatcher,this.context,this.delSrvc,super.dirsHandler); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1=BuilderUtils.newApplicationId(1234,1); File app1LogDir=new File(localLogDir,ConverterUtils.toString(application1)); app1LogDir.mkdir(); logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls)); logAggregationService.handle(new LogHandlerAppFinishedEvent(application1)); logAggregationService.stop(); assertEquals(0,logAggregationService.getNumAggregators()); Assert.assertFalse(new File(logAggregationService.getRemoteNodeLogFileForApp(application1,this.user).toUri().getPath()).exists()); dispatcher.await(); ApplicationEvent expectedEvents[]=new ApplicationEvent[]{new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_INITED),new ApplicationEvent(application1,ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED)}; checkEvents(appEventHandler,expectedEvents,true,"getType","getApplicationID"); dispatcher.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=20000) @SuppressWarnings("unchecked") public void testStopAfterError() throws Exception { DeletionService delSrvc=mock(DeletionService.class); LocalDirsHandlerService mockedDirSvc=mock(LocalDirsHandlerService.class); when(mockedDirSvc.getLogDirs()).thenThrow(new RuntimeException()); DrainDispatcher dispatcher=createDispatcher(); EventHandler appEventHandler=mock(EventHandler.class); dispatcher.register(ApplicationEventType.class,appEventHandler); LogAggregationService logAggregationService=new LogAggregationService(dispatcher,this.context,delSrvc,mockedDirSvc); logAggregationService.init(this.conf); logAggregationService.start(); ApplicationId application1=BuilderUtils.newApplicationId(1234,1); logAggregationService.handle(new LogHandlerAppStartedEvent(application1,this.user,null,ContainerLogsRetentionPolicy.ALL_CONTAINERS,this.acls)); logAggregationService.stop(); assertEquals(0,logAggregationService.getNumAggregators()); }

    Class: org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.TestContainersMonitor

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=20000) public void testContainerMonitorMemFlags(){ ContainersMonitor cm=null; long expPmem=8192 * 1024 * 1024l; long expVmem=(long)(expPmem * 2.1f); cm=new ContainersMonitorImpl(mock(ContainerExecutor.class),mock(AsyncDispatcher.class),mock(Context.class)); cm.init(getConfForCM(false,false,8192,2.1f)); assertEquals(expPmem,cm.getPmemAllocatedForContainers()); assertEquals(expVmem,cm.getVmemAllocatedForContainers()); assertEquals(false,cm.isPmemCheckEnabled()); assertEquals(false,cm.isVmemCheckEnabled()); cm=new ContainersMonitorImpl(mock(ContainerExecutor.class),mock(AsyncDispatcher.class),mock(Context.class)); cm.init(getConfForCM(true,false,8192,2.1f)); assertEquals(expPmem,cm.getPmemAllocatedForContainers()); assertEquals(expVmem,cm.getVmemAllocatedForContainers()); assertEquals(true,cm.isPmemCheckEnabled()); assertEquals(false,cm.isVmemCheckEnabled()); cm=new ContainersMonitorImpl(mock(ContainerExecutor.class),mock(AsyncDispatcher.class),mock(Context.class)); cm.init(getConfForCM(true,true,8192,2.1f)); assertEquals(expPmem,cm.getPmemAllocatedForContainers()); assertEquals(expVmem,cm.getVmemAllocatedForContainers()); assertEquals(true,cm.isPmemCheckEnabled()); assertEquals(true,cm.isVmemCheckEnabled()); cm=new ContainersMonitorImpl(mock(ContainerExecutor.class),mock(AsyncDispatcher.class),mock(Context.class)); cm.init(getConfForCM(false,true,8192,2.1f)); assertEquals(expPmem,cm.getPmemAllocatedForContainers()); assertEquals(expVmem,cm.getVmemAllocatedForContainers()); assertEquals(false,cm.isPmemCheckEnabled()); assertEquals(true,cm.isVmemCheckEnabled()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerKillOnMemoryOverflow() throws IOException, InterruptedException, YarnException { if (!ProcfsBasedProcessTree.isAvailable()) { return; } containerManager.start(); File scriptFile=new File(tmpDir,"scriptFile.sh"); PrintWriter fileWriter=new PrintWriter(scriptFile); File processStartFile=new File(tmpDir,"start_file.txt").getAbsoluteFile(); fileWriter.write("\numask 0"); fileWriter.write("\necho Hello World! > " + processStartFile); fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nsleep 15"); fileWriter.close(); ContainerLaunchContext containerLaunchContext=recordFactory.newRecordInstance(ContainerLaunchContext.class); ApplicationId appId=ApplicationId.newInstance(0,0); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,1); ContainerId cId=ContainerId.newInstance(appAttemptId,0); int port=12345; URL resource_alpha=ConverterUtils.getYarnUrlFromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath()))); LocalResource rsrc_alpha=recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile="dest_file"; Map localResources=new HashMap(); localResources.put(destinationFile,rsrc_alpha); containerLaunchContext.setLocalResources(localResources); List commands=new ArrayList(); commands.add("/bin/bash"); commands.add(scriptFile.getAbsolutePath()); containerLaunchContext.setCommands(commands); Resource r=BuilderUtils.newResource(8 * 1024 * 1024,1); ContainerTokenIdentifier containerIdentifier=new ContainerTokenIdentifier(cId,context.getNodeId().toString(),user,r,System.currentTimeMillis() + 120000,123,DUMMY_RM_IDENTIFIER,Priority.newInstance(0),0); Token containerToken=BuilderUtils.newContainerToken(context.getNodeId(),containerManager.getContext().getContainerTokenSecretManager().createPassword(containerIdentifier),containerIdentifier); StartContainerRequest scRequest=StartContainerRequest.newInstance(containerLaunchContext,containerToken); List list=new ArrayList(); list.add(scRequest); StartContainersRequest allRequests=StartContainersRequest.newInstance(list); containerManager.startContainers(allRequests); int timeoutSecs=0; while (!processStartFile.exists() && timeoutSecs++ < 20) { Thread.sleep(1000); LOG.info("Waiting for process start-file to be created"); } Assert.assertTrue("ProcessStartFile doesn't exist!",processStartFile.exists()); BufferedReader reader=new BufferedReader(new FileReader(processStartFile)); Assert.assertEquals("Hello World!",reader.readLine()); String pid=reader.readLine().trim(); Assert.assertEquals(null,reader.readLine()); BaseContainerManagerTest.waitForContainerState(containerManager,cId,ContainerState.COMPLETE,60); List containerIds=new ArrayList(); containerIds.add(cId); GetContainerStatusesRequest gcsRequest=GetContainerStatusesRequest.newInstance(containerIds); ContainerStatus containerStatus=containerManager.getContainerStatuses(gcsRequest).getContainerStatuses().get(0); Assert.assertEquals(ContainerExitStatus.KILLED_EXCEEDED_VMEM,containerStatus.getExitStatus()); String expectedMsgPattern="Container \\[pid=" + pid + ",containerID="+ cId+ "\\] is running beyond virtual memory limits. Current usage: "+ "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B physical memory used; "+ "[0-9.]+ ?[KMGTPE]?B of [0-9.]+ ?[KMGTPE]?B virtual memory used. "+ "Killing container.\nDump of the process-tree for "+ cId+ " :\n"; Pattern pat=Pattern.compile(expectedMsgPattern); Assert.assertEquals("Expected message pattern is: " + expectedMsgPattern + "\n\nObserved message is: "+ containerStatus.getDiagnostics(),true,pat.matcher(containerStatus.getDiagnostics()).find()); Assert.assertFalse("Process is still alive!",exec.signalContainer(user,pid,Signal.NULL)); }

    Class: org.apache.hadoop.yarn.server.nodemanager.recovery.TestNMLeveldbStateStoreService

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRemoveLocalizedResource() throws IOException { String user="somebody"; ApplicationId appId=ApplicationId.newInstance(1,1); Path appRsrcPath=new Path("hdfs://some/app/resource"); LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L); LocalResourceProto appRsrcProto=rsrcPb.getProto(); Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc"); stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath); LocalizedResourceProto appLocalizedProto=LocalizedResourceProto.newBuilder().setResource(appRsrcProto).setLocalPath(appRsrcLocalPath.toString()).setSize(1234567L).build(); stateStore.finishResourceLocalization(user,appId,appLocalizedProto); stateStore.removeLocalizedResource(user,appId,appRsrcLocalPath); restartStateStore(); verifyEmptyState(); stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath); stateStore.removeLocalizedResource(user,appId,appRsrcLocalPath); restartStateStore(); verifyEmptyState(); Path pubRsrcPath1=new Path("hdfs://some/public/resource1"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto1=rsrcPb.getProto(); Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1"); stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1); LocalizedResourceProto pubLocalizedProto1=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto1).setLocalPath(pubRsrcLocalPath1.toString()).setSize(789L).build(); stateStore.finishResourceLocalization(null,null,pubLocalizedProto1); Path pubRsrcPath2=new Path("hdfs://some/public/resource2"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto2=rsrcPb.getProto(); Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2"); stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2); LocalizedResourceProto pubLocalizedProto2=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto2).setLocalPath(pubRsrcLocalPath2.toString()).setSize(7654321L).build(); stateStore.finishResourceLocalization(null,null,pubLocalizedProto2); stateStore.removeLocalizedResource(null,null,pubRsrcLocalPath2); Path privRsrcPath=new Path("hdfs://some/private/resource"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*"); LocalResourceProto privRsrcProto=rsrcPb.getProto(); Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc"); stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath); stateStore.removeLocalizedResource(user,null,privRsrcLocalPath); restartStateStore(); RecoveredLocalizationState state=stateStore.loadLocalizationState(); LocalResourceTrackerState pubts=state.getPublicTrackerState(); assertTrue(pubts.getInProgressResources().isEmpty()); assertEquals(1,pubts.getLocalizedResources().size()); assertEquals(pubLocalizedProto1,pubts.getLocalizedResources().iterator().next()); Map userResources=state.getUserResources(); assertTrue(userResources.isEmpty()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testApplicationStorage() throws IOException { RecoveredApplicationsState state=stateStore.loadApplicationsState(); assertTrue(state.getApplications().isEmpty()); assertTrue(state.getFinishedApplications().isEmpty()); final ApplicationId appId1=ApplicationId.newInstance(1234,1); ContainerManagerApplicationProto.Builder builder=ContainerManagerApplicationProto.newBuilder(); builder.setId(((ApplicationIdPBImpl)appId1).getProto()); builder.setUser("user1"); ContainerManagerApplicationProto appProto1=builder.build(); stateStore.storeApplication(appId1,appProto1); restartStateStore(); state=stateStore.loadApplicationsState(); assertEquals(1,state.getApplications().size()); assertEquals(appProto1,state.getApplications().get(0)); assertTrue(state.getFinishedApplications().isEmpty()); stateStore.storeFinishedApplication(appId1); final ApplicationId appId2=ApplicationId.newInstance(1234,2); builder=ContainerManagerApplicationProto.newBuilder(); builder.setId(((ApplicationIdPBImpl)appId2).getProto()); builder.setUser("user2"); ContainerManagerApplicationProto appProto2=builder.build(); stateStore.storeApplication(appId2,appProto2); restartStateStore(); state=stateStore.loadApplicationsState(); assertEquals(2,state.getApplications().size()); assertTrue(state.getApplications().contains(appProto1)); assertTrue(state.getApplications().contains(appProto2)); assertEquals(1,state.getFinishedApplications().size()); assertEquals(appId1,state.getFinishedApplications().get(0)); stateStore.storeFinishedApplication(appId2); stateStore.removeApplication(appId2); restartStateStore(); state=stateStore.loadApplicationsState(); assertEquals(1,state.getApplications().size()); assertEquals(appProto1,state.getApplications().get(0)); assertEquals(1,state.getFinishedApplications().size()); assertEquals(appId1,state.getFinishedApplications().get(0)); }

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testNMTokenStorage() throws IOException { RecoveredNMTokensState state=stateStore.loadNMTokensState(); assertNull(state.getCurrentMasterKey()); assertNull(state.getPreviousMasterKey()); assertTrue(state.getApplicationMasterKeys().isEmpty()); NMTokenSecretManagerForTest secretMgr=new NMTokenSecretManagerForTest(); MasterKey currentKey=secretMgr.generateKey(); stateStore.storeNMTokenCurrentMasterKey(currentKey); restartStateStore(); state=stateStore.loadNMTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertNull(state.getPreviousMasterKey()); assertTrue(state.getApplicationMasterKeys().isEmpty()); MasterKey prevKey=secretMgr.generateKey(); stateStore.storeNMTokenPreviousMasterKey(prevKey); restartStateStore(); state=stateStore.loadNMTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); assertTrue(state.getApplicationMasterKeys().isEmpty()); ApplicationAttemptId attempt1=ApplicationAttemptId.newInstance(ApplicationId.newInstance(1,1),1); MasterKey attemptKey1=secretMgr.generateKey(); stateStore.storeNMTokenApplicationMasterKey(attempt1,attemptKey1); ApplicationAttemptId attempt2=ApplicationAttemptId.newInstance(ApplicationId.newInstance(2,3),4); MasterKey attemptKey2=secretMgr.generateKey(); stateStore.storeNMTokenApplicationMasterKey(attempt2,attemptKey2); restartStateStore(); state=stateStore.loadNMTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); Map loadedAppKeys=state.getApplicationMasterKeys(); assertEquals(2,loadedAppKeys.size()); assertEquals(attemptKey1,loadedAppKeys.get(attempt1)); assertEquals(attemptKey2,loadedAppKeys.get(attempt2)); ApplicationAttemptId attempt3=ApplicationAttemptId.newInstance(ApplicationId.newInstance(5,6),7); MasterKey attemptKey3=secretMgr.generateKey(); stateStore.storeNMTokenApplicationMasterKey(attempt3,attemptKey3); stateStore.removeNMTokenApplicationMasterKey(attempt1); attemptKey2=prevKey; stateStore.storeNMTokenApplicationMasterKey(attempt2,attemptKey2); prevKey=currentKey; stateStore.storeNMTokenPreviousMasterKey(prevKey); currentKey=secretMgr.generateKey(); stateStore.storeNMTokenCurrentMasterKey(currentKey); restartStateStore(); state=stateStore.loadNMTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); loadedAppKeys=state.getApplicationMasterKeys(); assertEquals(2,loadedAppKeys.size()); assertNull(loadedAppKeys.get(attempt1)); assertEquals(attemptKey2,loadedAppKeys.get(attempt2)); assertEquals(attemptKey3,loadedAppKeys.get(attempt3)); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerStorage() throws IOException { List recoveredContainers=stateStore.loadContainersState(); assertTrue(recoveredContainers.isEmpty()); ApplicationId appId=ApplicationId.newInstance(1234,3); ApplicationAttemptId appAttemptId=ApplicationAttemptId.newInstance(appId,4); ContainerId containerId=ContainerId.newInstance(appAttemptId,5); LocalResource lrsrc=LocalResource.newInstance(URL.newInstance("hdfs","somehost",12345,"/some/path/to/rsrc"),LocalResourceType.FILE,LocalResourceVisibility.APPLICATION,123L,1234567890L); Map localResources=new HashMap(); localResources.put("rsrc",lrsrc); Map env=new HashMap(); env.put("somevar","someval"); List containerCmds=new ArrayList(); containerCmds.add("somecmd"); containerCmds.add("somearg"); Map serviceData=new HashMap(); serviceData.put("someservice",ByteBuffer.wrap(new byte[]{0x1,0x2,0x3})); ByteBuffer containerTokens=ByteBuffer.wrap(new byte[]{0x7,0x8,0x9,0xa}); Map acls=new HashMap(); acls.put(ApplicationAccessType.VIEW_APP,"viewuser"); acls.put(ApplicationAccessType.MODIFY_APP,"moduser"); ContainerLaunchContext clc=ContainerLaunchContext.newInstance(localResources,env,containerCmds,serviceData,containerTokens,acls); Resource containerRsrc=Resource.newInstance(1357,3); ContainerTokenIdentifier containerTokenId=new ContainerTokenIdentifier(containerId,"host","user",containerRsrc,9876543210L,42,2468,Priority.newInstance(7),13579); Token containerToken=Token.newInstance(containerTokenId.getBytes(),ContainerTokenIdentifier.KIND.toString(),"password".getBytes(),"tokenservice"); StartContainerRequest containerReq=StartContainerRequest.newInstance(clc,containerToken); stateStore.storeContainer(containerId,containerReq); restartStateStore(); recoveredContainers=stateStore.loadContainersState(); assertEquals(1,recoveredContainers.size()); RecoveredContainerState rcs=recoveredContainers.get(0); assertEquals(RecoveredContainerStatus.REQUESTED,rcs.getStatus()); assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode()); assertEquals(false,rcs.getKilled()); assertEquals(containerReq,rcs.getStartRequest()); assertTrue(rcs.getDiagnostics().isEmpty()); StringBuilder diags=new StringBuilder(); stateStore.storeContainerLaunched(containerId); diags.append("some diags for container"); stateStore.storeContainerDiagnostics(containerId,diags); restartStateStore(); recoveredContainers=stateStore.loadContainersState(); assertEquals(1,recoveredContainers.size()); rcs=recoveredContainers.get(0); assertEquals(RecoveredContainerStatus.LAUNCHED,rcs.getStatus()); assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode()); assertEquals(false,rcs.getKilled()); assertEquals(containerReq,rcs.getStartRequest()); assertEquals(diags.toString(),rcs.getDiagnostics()); diags.append("some more diags for container"); stateStore.storeContainerDiagnostics(containerId,diags); stateStore.storeContainerKilled(containerId); restartStateStore(); recoveredContainers=stateStore.loadContainersState(); assertEquals(1,recoveredContainers.size()); rcs=recoveredContainers.get(0); assertEquals(RecoveredContainerStatus.LAUNCHED,rcs.getStatus()); assertEquals(ContainerExitStatus.INVALID,rcs.getExitCode()); assertTrue(rcs.getKilled()); assertEquals(containerReq,rcs.getStartRequest()); assertEquals(diags.toString(),rcs.getDiagnostics()); diags.append("some final diags"); stateStore.storeContainerDiagnostics(containerId,diags); stateStore.storeContainerCompleted(containerId,21); restartStateStore(); recoveredContainers=stateStore.loadContainersState(); assertEquals(1,recoveredContainers.size()); rcs=recoveredContainers.get(0); assertEquals(RecoveredContainerStatus.COMPLETED,rcs.getStatus()); assertEquals(21,rcs.getExitCode()); assertTrue(rcs.getKilled()); assertEquals(containerReq,rcs.getStartRequest()); assertEquals(diags.toString(),rcs.getDiagnostics()); stateStore.removeContainer(containerId); restartStateStore(); recoveredContainers=stateStore.loadContainersState(); assertTrue(recoveredContainers.isEmpty()); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testCheckVersion() throws IOException { Version defaultVersion=stateStore.getCurrentVersion(); Assert.assertEquals(defaultVersion,stateStore.loadVersion()); Version compatibleVersion=Version.newInstance(defaultVersion.getMajorVersion(),defaultVersion.getMinorVersion() + 2); stateStore.storeVersion(compatibleVersion); Assert.assertEquals(compatibleVersion,stateStore.loadVersion()); restartStateStore(); Assert.assertEquals(defaultVersion,stateStore.loadVersion()); Version incompatibleVersion=Version.newInstance(defaultVersion.getMajorVersion() + 1,defaultVersion.getMinorVersion()); stateStore.storeVersion(incompatibleVersion); try { restartStateStore(); Assert.fail("Incompatible version, should expect fail here."); } catch ( ServiceStateException e) { Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for NM state:")); } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testDeletionTaskStorage() throws IOException { RecoveredDeletionServiceState state=stateStore.loadDeletionServiceState(); assertTrue(state.getTasks().isEmpty()); DeletionServiceDeleteTaskProto proto=DeletionServiceDeleteTaskProto.newBuilder().setId(7).setUser("someuser").setSubdir("some/subdir").addBasedirs("some/dir/path").addBasedirs("some/other/dir/path").setDeletionTime(123456L).addSuccessorIds(8).addSuccessorIds(9).build(); stateStore.storeDeletionTask(proto.getId(),proto); restartStateStore(); state=stateStore.loadDeletionServiceState(); assertEquals(1,state.getTasks().size()); assertEquals(proto,state.getTasks().get(0)); DeletionServiceDeleteTaskProto proto2=DeletionServiceDeleteTaskProto.newBuilder().setId(8).setUser("user2").setSubdir("subdir2").setDeletionTime(789L).build(); stateStore.storeDeletionTask(proto2.getId(),proto2); restartStateStore(); state=stateStore.loadDeletionServiceState(); assertEquals(2,state.getTasks().size()); assertTrue(state.getTasks().contains(proto)); assertTrue(state.getTasks().contains(proto2)); stateStore.removeDeletionTask(proto2.getId()); restartStateStore(); state=stateStore.loadDeletionServiceState(); assertEquals(1,state.getTasks().size()); assertEquals(proto,state.getTasks().get(0)); stateStore.removeDeletionTask(proto.getId()); restartStateStore(); state=stateStore.loadDeletionServiceState(); assertTrue(state.getTasks().isEmpty()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testStartResourceLocalization() throws IOException { String user="somebody"; ApplicationId appId=ApplicationId.newInstance(1,1); Path appRsrcPath=new Path("hdfs://some/app/resource"); LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L); LocalResourceProto appRsrcProto=rsrcPb.getProto(); Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc"); stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath); restartStateStore(); RecoveredLocalizationState state=stateStore.loadLocalizationState(); LocalResourceTrackerState pubts=state.getPublicTrackerState(); assertTrue(pubts.getLocalizedResources().isEmpty()); assertTrue(pubts.getInProgressResources().isEmpty()); Map userResources=state.getUserResources(); assertEquals(1,userResources.size()); RecoveredUserResources rur=userResources.get(user); LocalResourceTrackerState privts=rur.getPrivateTrackerState(); assertNotNull(privts); assertTrue(privts.getLocalizedResources().isEmpty()); assertTrue(privts.getInProgressResources().isEmpty()); assertEquals(1,rur.getAppTrackerStates().size()); LocalResourceTrackerState appts=rur.getAppTrackerStates().get(appId); assertNotNull(appts); assertTrue(appts.getLocalizedResources().isEmpty()); assertEquals(1,appts.getInProgressResources().size()); assertEquals(appRsrcLocalPath,appts.getInProgressResources().get(appRsrcProto)); Path pubRsrcPath1=new Path("hdfs://some/public/resource1"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto1=rsrcPb.getProto(); Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1"); stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1); Path pubRsrcPath2=new Path("hdfs://some/public/resource2"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto2=rsrcPb.getProto(); Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2"); stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2); Path privRsrcPath=new Path("hdfs://some/private/resource"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*"); LocalResourceProto privRsrcProto=rsrcPb.getProto(); Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc"); stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath); restartStateStore(); state=stateStore.loadLocalizationState(); pubts=state.getPublicTrackerState(); assertTrue(pubts.getLocalizedResources().isEmpty()); assertEquals(2,pubts.getInProgressResources().size()); assertEquals(pubRsrcLocalPath1,pubts.getInProgressResources().get(pubRsrcProto1)); assertEquals(pubRsrcLocalPath2,pubts.getInProgressResources().get(pubRsrcProto2)); userResources=state.getUserResources(); assertEquals(1,userResources.size()); rur=userResources.get(user); privts=rur.getPrivateTrackerState(); assertNotNull(privts); assertTrue(privts.getLocalizedResources().isEmpty()); assertEquals(1,privts.getInProgressResources().size()); assertEquals(privRsrcLocalPath,privts.getInProgressResources().get(privRsrcProto)); assertEquals(1,rur.getAppTrackerStates().size()); appts=rur.getAppTrackerStates().get(appId); assertNotNull(appts); assertTrue(appts.getLocalizedResources().isEmpty()); assertEquals(1,appts.getInProgressResources().size()); assertEquals(appRsrcLocalPath,appts.getInProgressResources().get(appRsrcProto)); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testFinishResourceLocalization() throws IOException { String user="somebody"; ApplicationId appId=ApplicationId.newInstance(1,1); Path appRsrcPath=new Path("hdfs://some/app/resource"); LocalResourcePBImpl rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(appRsrcPath),LocalResourceType.ARCHIVE,LocalResourceVisibility.APPLICATION,123L,456L); LocalResourceProto appRsrcProto=rsrcPb.getProto(); Path appRsrcLocalPath=new Path("/some/local/dir/for/apprsrc"); stateStore.startResourceLocalization(user,appId,appRsrcProto,appRsrcLocalPath); LocalizedResourceProto appLocalizedProto=LocalizedResourceProto.newBuilder().setResource(appRsrcProto).setLocalPath(appRsrcLocalPath.toString()).setSize(1234567L).build(); stateStore.finishResourceLocalization(user,appId,appLocalizedProto); restartStateStore(); RecoveredLocalizationState state=stateStore.loadLocalizationState(); LocalResourceTrackerState pubts=state.getPublicTrackerState(); assertTrue(pubts.getLocalizedResources().isEmpty()); assertTrue(pubts.getInProgressResources().isEmpty()); Map userResources=state.getUserResources(); assertEquals(1,userResources.size()); RecoveredUserResources rur=userResources.get(user); LocalResourceTrackerState privts=rur.getPrivateTrackerState(); assertNotNull(privts); assertTrue(privts.getLocalizedResources().isEmpty()); assertTrue(privts.getInProgressResources().isEmpty()); assertEquals(1,rur.getAppTrackerStates().size()); LocalResourceTrackerState appts=rur.getAppTrackerStates().get(appId); assertNotNull(appts); assertTrue(appts.getInProgressResources().isEmpty()); assertEquals(1,appts.getLocalizedResources().size()); assertEquals(appLocalizedProto,appts.getLocalizedResources().iterator().next()); Path pubRsrcPath1=new Path("hdfs://some/public/resource1"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath1),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto1=rsrcPb.getProto(); Path pubRsrcLocalPath1=new Path("/some/local/dir/for/pubrsrc1"); stateStore.startResourceLocalization(null,null,pubRsrcProto1,pubRsrcLocalPath1); Path pubRsrcPath2=new Path("hdfs://some/public/resource2"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(pubRsrcPath2),LocalResourceType.FILE,LocalResourceVisibility.PUBLIC,789L,135L); LocalResourceProto pubRsrcProto2=rsrcPb.getProto(); Path pubRsrcLocalPath2=new Path("/some/local/dir/for/pubrsrc2"); stateStore.startResourceLocalization(null,null,pubRsrcProto2,pubRsrcLocalPath2); Path privRsrcPath=new Path("hdfs://some/private/resource"); rsrcPb=(LocalResourcePBImpl)LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(privRsrcPath),LocalResourceType.PATTERN,LocalResourceVisibility.PRIVATE,789L,680L,"*pattern*"); LocalResourceProto privRsrcProto=rsrcPb.getProto(); Path privRsrcLocalPath=new Path("/some/local/dir/for/privrsrc"); stateStore.startResourceLocalization(user,null,privRsrcProto,privRsrcLocalPath); LocalizedResourceProto pubLocalizedProto1=LocalizedResourceProto.newBuilder().setResource(pubRsrcProto1).setLocalPath(pubRsrcLocalPath1.toString()).setSize(pubRsrcProto1.getSize()).build(); stateStore.finishResourceLocalization(null,null,pubLocalizedProto1); LocalizedResourceProto privLocalizedProto=LocalizedResourceProto.newBuilder().setResource(privRsrcProto).setLocalPath(privRsrcLocalPath.toString()).setSize(privRsrcProto.getSize()).build(); stateStore.finishResourceLocalization(user,null,privLocalizedProto); restartStateStore(); state=stateStore.loadLocalizationState(); pubts=state.getPublicTrackerState(); assertEquals(1,pubts.getLocalizedResources().size()); assertEquals(pubLocalizedProto1,pubts.getLocalizedResources().iterator().next()); assertEquals(1,pubts.getInProgressResources().size()); assertEquals(pubRsrcLocalPath2,pubts.getInProgressResources().get(pubRsrcProto2)); userResources=state.getUserResources(); assertEquals(1,userResources.size()); rur=userResources.get(user); privts=rur.getPrivateTrackerState(); assertNotNull(privts); assertEquals(1,privts.getLocalizedResources().size()); assertEquals(privLocalizedProto,privts.getLocalizedResources().iterator().next()); assertTrue(privts.getInProgressResources().isEmpty()); assertEquals(1,rur.getAppTrackerStates().size()); appts=rur.getAppTrackerStates().get(appId); assertNotNull(appts); assertTrue(appts.getInProgressResources().isEmpty()); assertEquals(1,appts.getLocalizedResources().size()); assertEquals(appLocalizedProto,appts.getLocalizedResources().iterator().next()); }

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerTokenStorage() throws IOException { RecoveredContainerTokensState state=stateStore.loadContainerTokensState(); assertNull(state.getCurrentMasterKey()); assertNull(state.getPreviousMasterKey()); assertTrue(state.getActiveTokens().isEmpty()); ContainerTokenKeyGeneratorForTest keygen=new ContainerTokenKeyGeneratorForTest(new YarnConfiguration()); MasterKey currentKey=keygen.generateKey(); stateStore.storeContainerTokenCurrentMasterKey(currentKey); restartStateStore(); state=stateStore.loadContainerTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertNull(state.getPreviousMasterKey()); assertTrue(state.getActiveTokens().isEmpty()); MasterKey prevKey=keygen.generateKey(); stateStore.storeContainerTokenPreviousMasterKey(prevKey); restartStateStore(); state=stateStore.loadContainerTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); assertTrue(state.getActiveTokens().isEmpty()); ContainerId cid1=BuilderUtils.newContainerId(1,1,1,1); Long expTime1=1234567890L; ContainerId cid2=BuilderUtils.newContainerId(2,2,2,2); Long expTime2=9876543210L; stateStore.storeContainerToken(cid1,expTime1); stateStore.storeContainerToken(cid2,expTime2); restartStateStore(); state=stateStore.loadContainerTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); Map loadedActiveTokens=state.getActiveTokens(); assertEquals(2,loadedActiveTokens.size()); assertEquals(expTime1,loadedActiveTokens.get(cid1)); assertEquals(expTime2,loadedActiveTokens.get(cid2)); ContainerId cid3=BuilderUtils.newContainerId(3,3,3,3); Long expTime3=135798642L; stateStore.storeContainerToken(cid3,expTime3); stateStore.removeContainerToken(cid1); expTime2+=246897531L; stateStore.storeContainerToken(cid2,expTime2); prevKey=currentKey; stateStore.storeContainerTokenPreviousMasterKey(prevKey); currentKey=keygen.generateKey(); stateStore.storeContainerTokenCurrentMasterKey(currentKey); restartStateStore(); state=stateStore.loadContainerTokensState(); assertEquals(currentKey,state.getCurrentMasterKey()); assertEquals(prevKey,state.getPreviousMasterKey()); loadedActiveTokens=state.getActiveTokens(); assertEquals(2,loadedActiveTokens.size()); assertNull(loadedActiveTokens.get(cid1)); assertEquals(expTime2,loadedActiveTokens.get(cid2)); assertEquals(expTime3,loadedActiveTokens.get(cid3)); }

    Class: org.apache.hadoop.yarn.server.nodemanager.security.TestNMContainerTokenSecretManager

    APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testRecovery() throws IOException { YarnConfiguration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true); final NodeId nodeId=NodeId.newInstance("somehost",1234); final ContainerId cid1=BuilderUtils.newContainerId(1,1,1,1); final ContainerId cid2=BuilderUtils.newContainerId(2,2,2,2); ContainerTokenKeyGeneratorForTest keygen=new ContainerTokenKeyGeneratorForTest(conf); NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); NMContainerTokenSecretManager secretMgr=new NMContainerTokenSecretManager(conf,stateStore); secretMgr.setNodeId(nodeId); MasterKey currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); ContainerTokenIdentifier tokenId1=createContainerTokenId(cid1,nodeId,"user1",secretMgr); ContainerTokenIdentifier tokenId2=createContainerTokenId(cid2,nodeId,"user2",secretMgr); assertNotNull(secretMgr.retrievePassword(tokenId1)); assertNotNull(secretMgr.retrievePassword(tokenId2)); secretMgr=new NMContainerTokenSecretManager(conf,stateStore); secretMgr.setNodeId(nodeId); secretMgr.recover(); assertEquals(currentKey,secretMgr.getCurrentKey()); assertTrue(secretMgr.isValidStartContainerRequest(tokenId1)); assertTrue(secretMgr.isValidStartContainerRequest(tokenId2)); assertNotNull(secretMgr.retrievePassword(tokenId1)); assertNotNull(secretMgr.retrievePassword(tokenId2)); secretMgr.startContainerSuccessful(tokenId2); currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); secretMgr=new NMContainerTokenSecretManager(conf,stateStore); secretMgr.setNodeId(nodeId); secretMgr.recover(); assertEquals(currentKey,secretMgr.getCurrentKey()); assertTrue(secretMgr.isValidStartContainerRequest(tokenId1)); assertFalse(secretMgr.isValidStartContainerRequest(tokenId2)); assertNotNull(secretMgr.retrievePassword(tokenId1)); assertNotNull(secretMgr.retrievePassword(tokenId2)); currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); secretMgr=new NMContainerTokenSecretManager(conf,stateStore); secretMgr.setNodeId(nodeId); secretMgr.recover(); assertEquals(currentKey,secretMgr.getCurrentKey()); assertTrue(secretMgr.isValidStartContainerRequest(tokenId1)); assertFalse(secretMgr.isValidStartContainerRequest(tokenId2)); try { secretMgr.retrievePassword(tokenId1); fail("token should not be valid"); } catch ( InvalidToken e) { } try { secretMgr.retrievePassword(tokenId2); fail("token should not be valid"); } catch ( InvalidToken e) { } stateStore.close(); }

    Class: org.apache.hadoop.yarn.server.nodemanager.security.TestNMTokenSecretManagerInNM

    UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testRecovery() throws IOException { YarnConfiguration conf=new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED,true); final NodeId nodeId=NodeId.newInstance("somehost",1234); final ApplicationAttemptId attempt1=ApplicationAttemptId.newInstance(ApplicationId.newInstance(1,1),1); final ApplicationAttemptId attempt2=ApplicationAttemptId.newInstance(ApplicationId.newInstance(2,2),2); NMTokenKeyGeneratorForTest keygen=new NMTokenKeyGeneratorForTest(); NMMemoryStateStoreService stateStore=new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); NMTokenSecretManagerInNM secretMgr=new NMTokenSecretManagerInNM(stateStore); secretMgr.setNodeId(nodeId); MasterKey currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); NMTokenIdentifier attemptToken1=getNMTokenId(secretMgr.createNMToken(attempt1,nodeId,"user1")); NMTokenIdentifier attemptToken2=getNMTokenId(secretMgr.createNMToken(attempt2,nodeId,"user2")); secretMgr.appAttemptStartContainer(attemptToken1); secretMgr.appAttemptStartContainer(attemptToken2); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2)); assertNotNull(secretMgr.retrievePassword(attemptToken1)); assertNotNull(secretMgr.retrievePassword(attemptToken2)); secretMgr=new NMTokenSecretManagerInNM(stateStore); secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey,secretMgr.getCurrentKey()); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2)); assertNotNull(secretMgr.retrievePassword(attemptToken1)); assertNotNull(secretMgr.retrievePassword(attemptToken2)); currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); secretMgr.appFinished(attempt1.getApplicationId()); secretMgr=new NMTokenSecretManagerInNM(stateStore); secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey,secretMgr.getCurrentKey()); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2)); assertNotNull(secretMgr.retrievePassword(attemptToken1)); assertNotNull(secretMgr.retrievePassword(attemptToken2)); currentKey=keygen.generateKey(); secretMgr.setMasterKey(currentKey); secretMgr=new NMTokenSecretManagerInNM(stateStore); secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey,secretMgr.getCurrentKey()); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); assertTrue(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2)); try { secretMgr.retrievePassword(attemptToken1); fail("attempt token should not still be valid"); } catch ( InvalidToken e) { } assertNotNull(secretMgr.retrievePassword(attemptToken2)); secretMgr.appFinished(attempt2.getApplicationId()); secretMgr=new NMTokenSecretManagerInNM(stateStore); secretMgr.recover(); secretMgr.setNodeId(nodeId); assertEquals(currentKey,secretMgr.getCurrentKey()); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt1)); assertFalse(secretMgr.isAppAttemptNMTokenKeyPresent(attempt2)); try { secretMgr.retrievePassword(attemptToken1); fail("attempt token should not still be valid"); } catch ( InvalidToken e) { } try { secretMgr.retrievePassword(attemptToken2); fail("attempt token should not still be valid"); } catch ( InvalidToken e) { } stateStore.close(); }

    Class: org.apache.hadoop.yarn.server.nodemanager.util.TestProcessIdFileReader

    EqualityVerifier 
    @Test(timeout=30000) public void testSimpleGet() throws IOException { String rootDir=new File(System.getProperty("test.build.data","/tmp")).getAbsolutePath(); File testFile=null; String expectedProcessId=Shell.WINDOWS ? "container_1353742680940_0002_01_000001" : "56789"; try { testFile=new File(rootDir,"temp.txt"); PrintWriter fileWriter=new PrintWriter(testFile); fileWriter.println(expectedProcessId); fileWriter.close(); String processId=null; processId=ProcessIdFileReader.getProcessId(new Path(rootDir + Path.SEPARATOR + "temp.txt")); Assert.assertEquals(expectedProcessId,processId); } finally { if (testFile != null && testFile.exists()) { testFile.delete(); } } }

    APIUtilityVerifier EqualityVerifier 
    @Test(timeout=30000) public void testComplexGet() throws IOException { String rootDir=new File(System.getProperty("test.build.data","/tmp")).getAbsolutePath(); File testFile=null; String processIdInFile=Shell.WINDOWS ? " container_1353742680940_0002_01_000001 " : " 23 "; String expectedProcessId=processIdInFile.trim(); try { testFile=new File(rootDir,"temp.txt"); PrintWriter fileWriter=new PrintWriter(testFile); fileWriter.println(" "); fileWriter.println(""); fileWriter.println("abc"); fileWriter.println("-123"); fileWriter.println("-123 "); fileWriter.println(processIdInFile); fileWriter.println("6236"); fileWriter.close(); String processId=null; processId=ProcessIdFileReader.getProcessId(new Path(rootDir + Path.SEPARATOR + "temp.txt")); Assert.assertEquals(expectedProcessId,processId); } finally { if (testFile != null && testFile.exists()) { testFile.delete(); } } }

    Class: org.apache.hadoop.yarn.server.nodemanager.webapp.TestNMWebServices

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeInfo() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("node").path("info").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyNodeInfo(json); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidUri2() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeInfoSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("node").path("info/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyNodeInfo(json); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleNodesXML() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("node").path("info/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("nodeInfo"); assertEquals("incorrect number of elements",1,nodes.getLength()); verifyNodesXML(nodes); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("node").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyNodeInfo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNode() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("node").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyNodeInfo(json); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidUri() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("node").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeInfoDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("node").path("info").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyNodeInfo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("node/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyNodeInfo(json); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerLogs() throws IOException { WebResource r=resource(); final ContainerId containerId=BuilderUtils.newContainerId(0,0,0,0); final String containerIdStr=BuilderUtils.newContainerId(0,0,0,0).toString(); final ApplicationAttemptId appAttemptId=containerId.getApplicationAttemptId(); final ApplicationId appId=appAttemptId.getApplicationId(); final String appIdStr=appId.toString(); final String filename="logfile1"; final String logMessage="log message\n"; nmContext.getApplications().put(appId,new ApplicationImpl(null,"user",appId,null,nmContext)); MockContainer container=new MockContainer(appAttemptId,new AsyncDispatcher(),new Configuration(),"user",appId,1); container.setState(ContainerState.RUNNING); nmContext.getContainers().put(containerId,container); Path path=dirsHandler.getLogPathForWrite(ContainerLaunch.getRelativeContainerLogDir(appIdStr,containerIdStr) + "/" + filename,false); File logFile=new File(path.toUri().getPath()); logFile.deleteOnExit(); assertTrue("Failed to create log dir",logFile.getParentFile().mkdirs()); PrintWriter pw=new PrintWriter(logFile); pw.print(logMessage); pw.close(); ClientResponse response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class); String responseText=response.getEntity(String.class); assertEquals(logMessage,responseText); response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path("uhhh").accept(MediaType.TEXT_PLAIN).get(ClientResponse.class); Assert.assertEquals(Status.NOT_FOUND.getStatusCode(),response.getStatus()); responseText=response.getEntity(String.class); assertTrue(responseText.contains("Cannot find this log on the local disk.")); nmContext.getContainers().remove(containerId); Assert.assertNull(nmContext.getContainers().get(containerId)); response=r.path("ws").path("v1").path("node").path("containerlogs").path(containerIdStr).path(filename).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class); responseText=response.getEntity(String.class); assertEquals(logMessage,responseText); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidAccept() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("node").accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    Class: org.apache.hadoop.yarn.server.nodemanager.webapp.TestNMWebServicesApps

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodeSingleAppsMissing() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").path("application_1234_0009").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id application_1234_0009 not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeAppsStateNone() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); ClientResponse response=r.path("ws").path("v1").path("node").path("apps").queryParam("state",ApplicationState.INITING.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("apps is not null",JSONObject.NULL,json.get("apps")); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodeAppsUserEmpty() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").queryParam("user","").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: Error: You must specify a non-empty string for the user",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNodeAppsXML() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); ClientResponse response=r.path("ws").path("v1").path("node").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("app"); assertEquals("incorrect number of elements",2,nodes.getLength()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeAppsNone() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("node").path("apps").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("apps isn't NULL",JSONObject.NULL,json.get("apps")); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodeAppsStateInvalidDefault() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").queryParam("state","FOO_STATE").get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyStateInvalidException(message,type,classname); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodeAppsStateInvalid() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").queryParam("state","FOO_STATE").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyStateInvalidException(message,type,classname); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeAppsUserNone() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); ClientResponse response=r.path("ws").path("v1").path("node").path("apps").queryParam("user","george").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("apps is not null",JSONObject.NULL,json.get("apps")); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodeSingleAppsInvalid() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").path("app_foo_0000").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","For input string: \"foo\"",message); WebServicesTestUtils.checkStringMatch("exception type","NumberFormatException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.NumberFormatException",classname); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodeAppsStateInvalidXML() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("apps").queryParam("state","FOO_STATE").accept(MediaType.APPLICATION_XML).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String msg=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(msg)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("RemoteException"); Element element=(Element)nodes.item(0); String message=WebServicesTestUtils.getXmlString(element,"message"); String type=WebServicesTestUtils.getXmlString(element,"exception"); String classname=WebServicesTestUtils.getXmlString(element,"javaClassName"); verifyStateInvalidException(message,type,classname); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeSingleAppsSlash() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); HashMap hash=addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); ClientResponse response=r.path("ws").path("v1").path("node").path("apps").path(app.getAppId().toString() + "/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyNodeAppInfo(json.getJSONObject("app"),app,hash); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeAppsState() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); MockApp app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); HashMap hash2=addAppContainers(app2); app2.setState(ApplicationState.RUNNING); ClientResponse response=r.path("ws").path("v1").path("node").path("apps").queryParam("state",ApplicationState.RUNNING.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); JSONObject info=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,info.length()); JSONArray appInfo=info.getJSONArray("app"); assertEquals("incorrect number of elements",1,appInfo.length()); verifyNodeAppInfo(appInfo.getJSONObject(0),app2,hash2); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeAppsUser() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); HashMap hash=addAppContainers(app); Application app2=new MockApp("foo",1234,2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); ClientResponse response=r.path("ws").path("v1").path("node").path("apps").queryParam("user","mockUser").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); JSONObject info=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,info.length()); JSONArray appInfo=info.getJSONArray("app"); assertEquals("incorrect number of elements",1,appInfo.length()); verifyNodeAppInfo(appInfo.getJSONObject(0),app,hash); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNodeSingleAppsXML() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); HashMap hash=addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); ClientResponse response=r.path("ws").path("v1").path("node").path("apps").path(app.getAppId().toString() + "/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("app"); assertEquals("incorrect number of elements",1,nodes.getLength()); verifyNodeAppInfoXML(nodes,app,hash); }

    Class: org.apache.hadoop.yarn.server.nodemanager.webapp.TestNMWebServicesContainers

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testSingleContainerInvalid() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("containers").path("container_foo_1234").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: invalid container id, container_foo_1234",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testSingleContainerWrong() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("containers").path("container_1234_0001_01_000005").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: container with id, container_1234_0001_01_000005, not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testSingleContainerInvalid2() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); try { r.path("ws").path("v1").path("node").path("containers").path("container_1234_0001").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid user query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: invalid container id, container_1234_0001",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNodeSingleContainerXML() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); HashMap hash=addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); for ( String id : hash.keySet()) { ClientResponse response=r.path("ws").path("v1").path("node").path("containers").path(id).accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("container"); assertEquals("incorrect number of elements",1,nodes.getLength()); verifyContainersInfoXML(nodes,nmContext.getContainers().get(ConverterUtils.toContainerId(id))); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNodeContainerXML() throws JSONException, Exception { WebResource r=resource(); Application app=new MockApp(1); nmContext.getApplications().put(app.getAppId(),app); addAppContainers(app); Application app2=new MockApp(2); nmContext.getApplications().put(app2.getAppId(),app2); addAppContainers(app2); ClientResponse response=r.path("ws").path("v1").path("node").path("containers").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("container"); assertEquals("incorrect number of elements",4,nodes.getLength()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeContainersNone() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("node").path("containers").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("apps isn't NULL",JSONObject.NULL,json.get("containers")); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization

    APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testAuthorizedAccess() throws Exception { MyContainerManager containerManager=new MyContainerManager(); rm=new MockRMWithAMS(conf,containerManager); rm.start(); MockNM nm1=rm.registerNode("localhost:1234",5120); Map acls=new HashMap(2); acls.put(ApplicationAccessType.VIEW_APP,"*"); RMApp app=rm.submitApp(1024,"appname","appuser",acls); nm1.nodeHeartbeat(true); int waitCount=0; while (containerManager.containerTokens == null && waitCount++ < 20) { LOG.info("Waiting for AM Launch to happen.."); Thread.sleep(1000); } Assert.assertNotNull(containerManager.containerTokens); RMAppAttempt attempt=app.getCurrentAppAttempt(); ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId(); waitForLaunchedState(attempt); final Configuration conf=rm.getConfig(); final YarnRPC rpc=YarnRPC.create(conf); UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString()); Credentials credentials=containerManager.getContainerCredentials(); final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress(); Token amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens()); currentUser.addToken(amRMToken); ApplicationMasterProtocol client=currentUser.doAs(new PrivilegedAction(){ @Override public ApplicationMasterProtocol run(){ return (ApplicationMasterProtocol)rpc.getProxy(ApplicationMasterProtocol.class,rm.getApplicationMasterService().getBindAddress(),conf); } } ); RegisterApplicationMasterRequest request=Records.newRecord(RegisterApplicationMasterRequest.class); RegisterApplicationMasterResponse response=client.registerApplicationMaster(request); Assert.assertNotNull(response.getClientToAMTokenMasterKey()); if (UserGroupInformation.isSecurityEnabled()) { Assert.assertTrue(response.getClientToAMTokenMasterKey().array().length > 0); } Assert.assertEquals("Register response has bad ACLs","*",response.getApplicationACLs().get(ApplicationAccessType.VIEW_APP)); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestAppManager

    InternalCallVerifier EqualityVerifier 
    @Test public void testRMAppRetireNullApp() throws Exception { long now=System.currentTimeMillis(); RMContext rmContext=mockRMContext(10,now - 20000); TestRMAppManager appMonitor=new TestRMAppManager(rmContext,new Configuration()); Assert.assertEquals("Number of apps incorrect before",10,rmContext.getRMApps().size()); appMonitor.finishApplication(null); Assert.assertEquals("Number of completed apps incorrect after check",0,appMonitor.getCompletedAppsListSize()); }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testRMAppSubmitDuplicateApplicationId() throws Exception { ApplicationId appId=MockApps.newAppID(0); asContext.setApplicationId(appId); RMApp appOrig=rmContext.getRMApps().get(appId); Assert.assertTrue("app name matches but shouldn't","testApp1" != appOrig.getName()); try { appMonitor.submitApplication(asContext,"test"); Assert.fail("Exception is expected when applicationId is duplicate."); } catch ( YarnException e) { Assert.assertTrue("The thrown exception is not the expectd one.",e.getMessage().contains("Cannot add a duplicate!")); } RMApp app=rmContext.getRMApps().get(appId); Assert.assertNotNull("app is null",app); Assert.assertEquals("app id doesn't match",appId,app.getApplicationId()); Assert.assertEquals("app state doesn't match",RMAppState.FINISHED,app.getState()); }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testRMAppSubmitMaxAppAttempts() throws Exception { int[] globalMaxAppAttempts=new int[]{10,1}; int[][] individualMaxAppAttempts=new int[][]{new int[]{9,10,11,0},new int[]{1,10,0,-1}}; int[][] expectedNums=new int[][]{new int[]{9,10,10,10},new int[]{1,1,1,1}}; for (int i=0; i < globalMaxAppAttempts.length; ++i) { for (int j=0; j < individualMaxAppAttempts.length; ++j) { ResourceScheduler scheduler=mockResourceScheduler(); Configuration conf=new Configuration(); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,globalMaxAppAttempts[i]); ApplicationMasterService masterService=new ApplicationMasterService(rmContext,scheduler); TestRMAppManager appMonitor=new TestRMAppManager(rmContext,new ClientToAMTokenSecretManagerInRM(),scheduler,masterService,new ApplicationACLsManager(conf),conf); ApplicationId appID=MockApps.newAppID(i * 4 + j + 1); asContext.setApplicationId(appID); if (individualMaxAppAttempts[i][j] != 0) { asContext.setMaxAppAttempts(individualMaxAppAttempts[i][j]); } appMonitor.submitApplication(asContext,"test"); RMApp app=rmContext.getRMApps().get(appID); Assert.assertEquals("max application attempts doesn't match",expectedNums[i][j],app.getMaxAppAttempts()); int timeoutSecs=0; while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) { Thread.sleep(1000); } setAppEventType(RMAppEventType.KILL); } } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRMAppRetireSome() throws Exception { long now=System.currentTimeMillis(); RMContext rmContext=mockRMContext(10,now - 20000); Configuration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,3); conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,3); TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf); Assert.assertEquals("Number of apps incorrect before",10,rmContext.getRMApps().size()); addToCompletedApps(appMonitor,rmContext); appMonitor.checkAppNumCompletedLimit(); Assert.assertEquals("Number of apps incorrect after # completed check",3,rmContext.getRMApps().size()); Assert.assertEquals("Number of completed apps incorrect after check",3,appMonitor.getCompletedAppsListSize()); verify(rmContext.getStateStore(),times(7)).removeApplication(isA(RMApp.class)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRMAppRetireSomeDifferentStates() throws Exception { long now=System.currentTimeMillis(); RMContext rmContext=mockRMContext(10,now - 20000); Configuration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,2); conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,2); TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf); rmContext.getRMApps().clear(); Assert.assertEquals("map isn't empty",0,rmContext.getRMApps().size()); RMApp app=new MockRMApp(0,now - 20000,RMAppState.KILLED); rmContext.getRMApps().put(app.getApplicationId(),app); app=new MockRMApp(1,now - 200000,RMAppState.FAILED); rmContext.getRMApps().put(app.getApplicationId(),app); app=new MockRMApp(2,now - 30000,RMAppState.FINISHED); rmContext.getRMApps().put(app.getApplicationId(),app); app=new MockRMApp(3,now - 20000,RMAppState.RUNNING); rmContext.getRMApps().put(app.getApplicationId(),app); app=new MockRMApp(4,now - 20000,RMAppState.NEW); rmContext.getRMApps().put(app.getApplicationId(),app); app=new MockRMApp(5,now - 10001,RMAppState.KILLED); rmContext.getRMApps().put(app.getApplicationId(),app); app=new MockRMApp(6,now - 30000,RMAppState.ACCEPTED); rmContext.getRMApps().put(app.getApplicationId(),app); app=new MockRMApp(7,now - 20000,RMAppState.SUBMITTED); rmContext.getRMApps().put(app.getApplicationId(),app); app=new MockRMApp(8,now - 10001,RMAppState.FAILED); rmContext.getRMApps().put(app.getApplicationId(),app); app=new MockRMApp(9,now - 20000,RMAppState.FAILED); rmContext.getRMApps().put(app.getApplicationId(),app); Assert.assertEquals("Number of apps incorrect before",10,rmContext.getRMApps().size()); addToCompletedApps(appMonitor,rmContext); appMonitor.checkAppNumCompletedLimit(); Assert.assertEquals("Number of apps incorrect after # completed check",6,rmContext.getRMApps().size()); Assert.assertEquals("Number of completed apps incorrect after check",2,appMonitor.getCompletedAppsListSize()); verify(rmContext.getStateStore(),times(4)).removeApplication(isA(RMApp.class)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRMAppRetireZeroSetting() throws Exception { long now=System.currentTimeMillis(); RMContext rmContext=mockRMContext(10,now - 20000); Configuration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,0); conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,0); TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf); Assert.assertEquals("Number of apps incorrect before",10,rmContext.getRMApps().size()); addToCompletedApps(appMonitor,rmContext); Assert.assertEquals("Number of completed apps incorrect",10,appMonitor.getCompletedAppsListSize()); appMonitor.checkAppNumCompletedLimit(); Assert.assertEquals("Number of apps incorrect after # completed check",0,rmContext.getRMApps().size()); Assert.assertEquals("Number of completed apps incorrect after check",0,appMonitor.getCompletedAppsListSize()); verify(rmContext.getStateStore(),times(10)).removeApplication(isA(RMApp.class)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRMAppRetireNone() throws Exception { long now=System.currentTimeMillis(); RMContext rmContext=mockRMContext(10,now - 10); Configuration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,10); TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf); Assert.assertEquals("Number of apps incorrect before checkAppTimeLimit",10,rmContext.getRMApps().size()); addToCompletedApps(appMonitor,rmContext); appMonitor.checkAppNumCompletedLimit(); Assert.assertEquals("Number of apps incorrect after # completed check",10,rmContext.getRMApps().size()); Assert.assertEquals("Number of completed apps incorrect after check",10,appMonitor.getCompletedAppsListSize()); verify(rmContext.getStateStore(),never()).removeApplication(isA(RMApp.class)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testStateStoreAppLimitLargerThanMemoryAppLimit(){ long now=System.currentTimeMillis(); RMContext rmContext=mockRMContext(10,now - 20000); Configuration conf=new YarnConfiguration(); int maxAppsInMemory=8; conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,maxAppsInMemory); conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,1000); TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf); addToCompletedApps(appMonitor,rmContext); Assert.assertEquals("Number of completed apps incorrect",10,appMonitor.getCompletedAppsListSize()); appMonitor.checkAppNumCompletedLimit(); int numRemoveApps=10 - maxAppsInMemory; Assert.assertEquals("Number of apps incorrect after # completed check",maxAppsInMemory,rmContext.getRMApps().size()); Assert.assertEquals("Number of completed apps incorrect after check",maxAppsInMemory,appMonitor.getCompletedAppsListSize()); verify(rmContext.getStateStore(),times(numRemoveApps)).removeApplication(isA(RMApp.class)); Assert.assertEquals(maxAppsInMemory,appMonitor.getCompletedAppsInStateStore()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testStateStoreAppLimitLessThanMemoryAppLimit(){ long now=System.currentTimeMillis(); RMContext rmContext=mockRMContext(10,now - 20000); Configuration conf=new YarnConfiguration(); int maxAppsInMemory=8; int maxAppsInStateStore=4; conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,maxAppsInMemory); conf.setInt(YarnConfiguration.RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS,maxAppsInStateStore); TestRMAppManager appMonitor=new TestRMAppManager(rmContext,conf); addToCompletedApps(appMonitor,rmContext); Assert.assertEquals("Number of completed apps incorrect",10,appMonitor.getCompletedAppsListSize()); appMonitor.checkAppNumCompletedLimit(); Assert.assertEquals("Number of apps incorrect after # completed check",maxAppsInMemory,rmContext.getRMApps().size()); Assert.assertEquals("Number of completed apps incorrect after check",maxAppsInMemory,appMonitor.getCompletedAppsListSize()); int numRemoveAppsFromStateStore=10 - maxAppsInStateStore; verify(rmContext.getStateStore(),times(numRemoveAppsFromStateStore)).removeApplication(isA(RMApp.class)); Assert.assertEquals(maxAppsInStateStore,appMonitor.getCompletedAppsInStateStore()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testRMAppSubmit() throws Exception { appMonitor.submitApplication(asContext,"test"); RMApp app=rmContext.getRMApps().get(appId); Assert.assertNotNull("app is null",app); Assert.assertEquals("app id doesn't match",appId,app.getApplicationId()); Assert.assertEquals("app state doesn't match",RMAppState.NEW,app.getState()); int timeoutSecs=0; while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) { Thread.sleep(1000); } Assert.assertEquals("app event type sent is wrong",RMAppEventType.START,getAppEventType()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestApplicationCleanup

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @SuppressWarnings("resource") @Test public void testContainerCleanup() throws Exception { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); final DrainDispatcher dispatcher=new DrainDispatcher(); MockRM rm=new MockRM(){ @Override protected EventHandler createSchedulerEventDispatcher(){ return new SchedulerEventDispatcher(this.scheduler){ @Override public void handle( SchedulerEvent event){ scheduler.handle(event); } } ; } @Override protected Dispatcher createDispatcher(){ return dispatcher; } } ; rm.start(); MockNM nm1=rm.registerNode("127.0.0.1:1234",5000); RMApp app=rm.submitApp(2000); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app.getCurrentAppAttempt(); MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId()); am.registerAppAttempt(); int request=2; am.allocate("127.0.0.1",1000,request,new ArrayList()); dispatcher.await(); nm1.nodeHeartbeat(true); List conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); int contReceived=conts.size(); int waitCount=0; while (contReceived < request && waitCount++ < 200) { LOG.info("Got " + contReceived + " containers. Waiting to get "+ request); Thread.sleep(100); conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); dispatcher.await(); contReceived+=conts.size(); nm1.nodeHeartbeat(true); } Assert.assertEquals(request,contReceived); ArrayList release=new ArrayList(); release.add(conts.get(0).getId()); am.allocate(new ArrayList(),release); dispatcher.await(); Map> containerStatuses=new HashMap>(); ArrayList containerStatusList=new ArrayList(); containerStatusList.add(BuilderUtils.newContainerStatus(conts.get(0).getId(),ContainerState.RUNNING,"nothing",0)); containerStatuses.put(app.getApplicationId(),containerStatusList); NodeHeartbeatResponse resp=nm1.nodeHeartbeat(containerStatuses,true); waitForContainerCleanup(dispatcher,nm1,resp); LOG.info("Testing container launch much after release and " + "NM getting cleanup"); containerStatuses.clear(); containerStatusList.clear(); containerStatusList.add(BuilderUtils.newContainerStatus(conts.get(0).getId(),ContainerState.RUNNING,"nothing",0)); containerStatuses.put(app.getApplicationId(),containerStatusList); resp=nm1.nodeHeartbeat(containerStatuses,true); waitForContainerCleanup(dispatcher,nm1,resp); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @SuppressWarnings("resource") @Test public void testAppCleanup() throws Exception { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); MockRM rm=new MockRM(); rm.start(); MockNM nm1=rm.registerNode("127.0.0.1:1234",5000); RMApp app=rm.submitApp(2000); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app.getCurrentAppAttempt(); MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId()); am.registerAppAttempt(); int request=2; am.allocate("127.0.0.1",1000,request,new ArrayList()); nm1.nodeHeartbeat(true); List conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); int contReceived=conts.size(); int waitCount=0; while (contReceived < request && waitCount++ < 200) { LOG.info("Got " + contReceived + " containers. Waiting to get "+ request); Thread.sleep(100); conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); contReceived+=conts.size(); nm1.nodeHeartbeat(true); } Assert.assertEquals(request,contReceived); am.unregisterAppAttempt(); NodeHeartbeatResponse resp=nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FINISHED); resp=nm1.nodeHeartbeat(true); List containersToCleanup=resp.getContainersToCleanup(); List appsToCleanup=resp.getApplicationsToCleanup(); int numCleanedContainers=containersToCleanup.size(); int numCleanedApps=appsToCleanup.size(); waitCount=0; while ((numCleanedContainers < 2 || numCleanedApps < 1) && waitCount++ < 200) { LOG.info("Waiting to get cleanup events.. cleanedConts: " + numCleanedContainers + " cleanedApps: "+ numCleanedApps); Thread.sleep(100); resp=nm1.nodeHeartbeat(true); List deltaContainersToCleanup=resp.getContainersToCleanup(); List deltaAppsToCleanup=resp.getApplicationsToCleanup(); containersToCleanup.addAll(deltaContainersToCleanup); appsToCleanup.addAll(deltaAppsToCleanup); numCleanedContainers=containersToCleanup.size(); numCleanedApps=appsToCleanup.size(); } Assert.assertEquals(1,appsToCleanup.size()); Assert.assertEquals(app.getApplicationId(),appsToCleanup.get(0)); Assert.assertEquals(1,numCleanedApps); Assert.assertEquals(2,numCleanedContainers); rm.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestApplicationMasterLauncher

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testAMLaunchAndCleanup() throws Exception { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); MyContainerManagerImpl containerManager=new MyContainerManagerImpl(); MockRMWithCustomAMLauncher rm=new MockRMWithCustomAMLauncher(containerManager); rm.start(); MockNM nm1=rm.registerNode("127.0.0.1:1234",5120); RMApp app=rm.submitApp(2000); nm1.nodeHeartbeat(true); int waitCount=0; while (containerManager.launched == false && waitCount++ < 20) { LOG.info("Waiting for AM Launch to happen.."); Thread.sleep(1000); } Assert.assertTrue(containerManager.launched); RMAppAttempt attempt=app.getCurrentAppAttempt(); ApplicationAttemptId appAttemptId=attempt.getAppAttemptId(); Assert.assertEquals(appAttemptId.toString(),containerManager.attemptIdAtContainerManager); Assert.assertEquals(app.getSubmitTime(),containerManager.submitTimeAtContainerManager); Assert.assertEquals(app.getRMAppAttempt(appAttemptId).getMasterContainer().getId().toString(),containerManager.containerIdAtContainerManager); Assert.assertEquals(nm1.getNodeId().toString(),containerManager.nmHostAtContainerManager); Assert.assertEquals(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS,containerManager.maxAppAttempts); MockAM am=new MockAM(rm.getRMContext(),rm.getApplicationMasterService(),appAttemptId); am.registerAppAttempt(); am.unregisterAppAttempt(); nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FINISHED); waitCount=0; while (containerManager.cleanedup == false && waitCount++ < 20) { LOG.info("Waiting for AM Cleanup to happen.."); Thread.sleep(1000); } Assert.assertTrue(containerManager.cleanedup); am.waitForState(RMAppAttemptState.FINISHED); rm.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("unused") @Test(timeout=100000) public void testallocateBeforeAMRegistration() throws Exception { Logger rootLogger=LogManager.getRootLogger(); boolean thrown=false; rootLogger.setLevel(Level.DEBUG); MockRM rm=new MockRM(); rm.start(); MockNM nm1=rm.registerNode("h1:1234",5000); RMApp app=rm.submitApp(2000); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app.getCurrentAppAttempt(); MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId()); int request=2; AllocateResponse ar=am.allocate("h1",1000,request,new ArrayList()); Assert.assertTrue(ar.getAMCommand() == AMCommand.AM_RESYNC); nm1.nodeHeartbeat(true); AllocateResponse amrs=am.allocate(new ArrayList(),new ArrayList()); Assert.assertTrue(ar.getAMCommand() == AMCommand.AM_RESYNC); am.registerAppAttempt(); thrown=false; try { am.registerAppAttempt(false); } catch ( Exception e) { Assert.assertEquals("Application Master is already registered : " + attempt.getAppAttemptId().getApplicationId(),e.getMessage()); thrown=true; } Assert.assertTrue(thrown); am.unregisterAppAttempt(); nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FINISHED); AllocateResponse amrs2=am.allocate(new ArrayList(),new ArrayList()); Assert.assertTrue(amrs2.getAMCommand() == AMCommand.AM_SHUTDOWN); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestApplicationMasterService

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=3000000) public void testRMIdentifierOnContainerAllocation() throws Exception { MockRM rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB); RMApp app1=rm.submitApp(2048); nm1.nodeHeartbeat(true); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); am1.addRequests(new String[]{"127.0.0.1"},GB,1,1); AllocateResponse alloc1Response=am1.schedule(); nm1.nodeHeartbeat(true); while (alloc1Response.getAllocatedContainers().size() < 1) { LOG.info("Waiting for containers to be created for app 1..."); sleep(1000); alloc1Response=am1.schedule(); } Container allocatedContainer=alloc1Response.getAllocatedContainers().get(0); ContainerTokenIdentifier tokenId=BuilderUtils.newContainerTokenIdentifier(allocatedContainer.getContainerToken()); Assert.assertEquals(MockRM.getClusterTimeStamp(),tokenId.getRMIdentifer()); rm.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestClientRMService

    APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testForceKillNonExistingApplication() throws YarnException { RMContext rmContext=mock(RMContext.class); when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap()); ClientRMService rmService=new ClientRMService(rmContext,null,null,null,null,null); ApplicationId applicationId=BuilderUtils.newApplicationId(System.currentTimeMillis(),0); KillApplicationRequest request=KillApplicationRequest.newInstance(applicationId); try { rmService.forceKillApplication(request); Assert.fail(); } catch ( ApplicationNotFoundException ex) { Assert.assertEquals(ex.getMessage(),"Trying to kill an absent " + "application " + request.getApplicationId()); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetQueueInfo() throws Exception { YarnScheduler yarnScheduler=mock(YarnScheduler.class); RMContext rmContext=mock(RMContext.class); mockRMContext(yarnScheduler,rmContext); ClientRMService rmService=new ClientRMService(rmContext,yarnScheduler,null,null,null,null); GetQueueInfoRequest request=recordFactory.newRecordInstance(GetQueueInfoRequest.class); request.setQueueName("testqueue"); request.setIncludeApplications(true); GetQueueInfoResponse queueInfo=rmService.getQueueInfo(request); List applications=queueInfo.getQueueInfo().getApplications(); Assert.assertEquals(2,applications.size()); request.setQueueName("nonexistentqueue"); request.setIncludeApplications(true); queueInfo=rmService.getQueueInfo(request); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetApplicationReport() throws YarnException { RMContext rmContext=mock(RMContext.class); when(rmContext.getRMApps()).thenReturn(new ConcurrentHashMap()); ClientRMService rmService=new ClientRMService(rmContext,null,null,null,null,null); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null); GetApplicationReportRequest request=recordFactory.newRecordInstance(GetApplicationReportRequest.class); request.setApplicationId(ApplicationId.newInstance(0,0)); try { rmService.getApplicationReport(request); Assert.fail(); } catch ( ApplicationNotFoundException ex) { Assert.assertEquals(ex.getMessage(),"Application with id '" + request.getApplicationId() + "' doesn't exist in RM."); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetApplicationResourceUsageReportDummy() throws YarnException, IOException { ApplicationAttemptId attemptId=getApplicationAttemptId(1); YarnScheduler yarnScheduler=mockYarnScheduler(); RMContext rmContext=mock(RMContext.class); mockRMContext(yarnScheduler,rmContext); when(rmContext.getDispatcher().getEventHandler()).thenReturn(new EventHandler(){ public void handle( Event event){ } } ); ApplicationSubmissionContext asContext=mock(ApplicationSubmissionContext.class); YarnConfiguration config=new YarnConfiguration(); RMAppAttemptImpl rmAppAttemptImpl=new RMAppAttemptImpl(attemptId,rmContext,yarnScheduler,null,asContext,config,false); ApplicationResourceUsageReport report=rmAppAttemptImpl.getApplicationResourceUsageReport(); assertEquals(report,RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT); }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetContainerReport() throws YarnException, IOException { ClientRMService rmService=createRMService(); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null); GetContainerReportRequest request=recordFactory.newRecordInstance(GetContainerReportRequest.class); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1); ContainerId containerId=ContainerId.newInstance(attemptId,1); request.setContainerId(containerId); try { GetContainerReportResponse response=rmService.getContainerReport(request); Assert.assertEquals(containerId,response.getContainerReport().getContainerId()); } catch ( ApplicationNotFoundException ex) { Assert.fail(ex.getMessage()); } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testForceKillApplication() throws Exception { YarnConfiguration conf=new YarnConfiguration(); MockRM rm=new MockRM(); rm.init(conf); rm.start(); ClientRMService rmService=rm.getClientRMService(); GetApplicationsRequest getRequest=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.KILLED)); RMApp app1=rm.submitApp(1024); RMApp app2=rm.submitApp(1024,true); assertEquals("Incorrect number of apps in the RM",0,rmService.getApplications(getRequest).getApplicationList().size()); KillApplicationRequest killRequest1=KillApplicationRequest.newInstance(app1.getApplicationId()); KillApplicationRequest killRequest2=KillApplicationRequest.newInstance(app2.getApplicationId()); int killAttemptCount=0; for (int i=0; i < 100; i++) { KillApplicationResponse killResponse1=rmService.forceKillApplication(killRequest1); killAttemptCount++; if (killResponse1.getIsKillCompleted()) { break; } Thread.sleep(10); } assertTrue("Kill attempt count should be greater than 1 for managed AMs",killAttemptCount > 1); assertEquals("Incorrect number of apps in the RM",1,rmService.getApplications(getRequest).getApplicationList().size()); KillApplicationResponse killResponse2=rmService.forceKillApplication(killRequest2); assertTrue("Killing UnmanagedAM should falsely acknowledge true",killResponse2.getIsKillCompleted()); for (int i=0; i < 100; i++) { if (2 == rmService.getApplications(getRequest).getApplicationList().size()) { break; } Thread.sleep(10); } assertEquals("Incorrect number of apps in the RM",2,rmService.getApplications(getRequest).getApplicationList().size()); }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) @SuppressWarnings("rawtypes") public void testAppSubmit() throws Exception { YarnScheduler yarnScheduler=mockYarnScheduler(); RMContext rmContext=mock(RMContext.class); mockRMContext(yarnScheduler,rmContext); RMStateStore stateStore=mock(RMStateStore.class); when(rmContext.getStateStore()).thenReturn(stateStore); RMAppManager appManager=new RMAppManager(rmContext,yarnScheduler,null,mock(ApplicationACLsManager.class),new Configuration()); when(rmContext.getDispatcher().getEventHandler()).thenReturn(new EventHandler(){ public void handle( Event event){ } } ); ApplicationId appId1=getApplicationId(100); ApplicationACLsManager mockAclsManager=mock(ApplicationACLsManager.class); when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(),ApplicationAccessType.VIEW_APP,null,appId1)).thenReturn(true); QueueACLsManager mockQueueACLsManager=mock(QueueACLsManager.class); when(mockQueueACLsManager.checkAccess(any(UserGroupInformation.class),any(QueueACL.class),anyString())).thenReturn(true); ClientRMService rmService=new ClientRMService(rmContext,yarnScheduler,appManager,mockAclsManager,mockQueueACLsManager,null); SubmitApplicationRequest submitRequest1=mockSubmitAppRequest(appId1,null,null); try { rmService.submitApplication(submitRequest1); } catch ( YarnException e) { Assert.fail("Exception is not expected."); } RMApp app1=rmContext.getRMApps().get(appId1); Assert.assertNotNull("app doesn't exist",app1); Assert.assertEquals("app name doesn't match",YarnConfiguration.DEFAULT_APPLICATION_NAME,app1.getName()); Assert.assertEquals("app queue doesn't match",YarnConfiguration.DEFAULT_QUEUE_NAME,app1.getQueue()); String name=MockApps.newAppName(); String queue=MockApps.newQueue(); ApplicationId appId2=getApplicationId(101); SubmitApplicationRequest submitRequest2=mockSubmitAppRequest(appId2,name,queue); submitRequest2.getApplicationSubmissionContext().setApplicationType("matchType"); try { rmService.submitApplication(submitRequest2); } catch ( YarnException e) { Assert.fail("Exception is not expected."); } RMApp app2=rmContext.getRMApps().get(appId2); Assert.assertNotNull("app doesn't exist",app2); Assert.assertEquals("app name doesn't match",name,app2.getName()); Assert.assertEquals("app queue doesn't match",queue,app2.getQueue()); try { rmService.submitApplication(submitRequest2); } catch ( YarnException e) { Assert.fail("Exception is not expected."); } GetApplicationsRequest getAllAppsRequest=GetApplicationsRequest.newInstance(new HashSet()); GetApplicationsResponse getAllApplicationsResponse=rmService.getApplications(getAllAppsRequest); Assert.assertEquals(5,getAllApplicationsResponse.getApplicationList().size()); Set appTypes=new HashSet(); appTypes.add("matchType"); getAllAppsRequest=GetApplicationsRequest.newInstance(appTypes); getAllApplicationsResponse=rmService.getApplications(getAllAppsRequest); Assert.assertEquals(1,getAllApplicationsResponse.getApplicationList().size()); Assert.assertEquals(appId2,getAllApplicationsResponse.getApplicationList().get(0).getApplicationId()); }

    APIUtilityVerifier InternalCallVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetClusterNodes() throws Exception { MockRM rm=new MockRM(){ protected ClientRMService createClientRMService(){ return new ClientRMService(this.rmContext,scheduler,this.rmAppManager,this.applicationACLsManager,this.queueACLsManager,this.getRMContext().getRMDelegationTokenSecretManager()); } } ; rm.start(); MockNM node=rm.registerNode("host1:1234",1024); rm.sendNodeStarted(node); node.nodeHeartbeat(true); MockNM lostNode=rm.registerNode("host2:1235",1024); rm.sendNodeStarted(lostNode); lostNode.nodeHeartbeat(true); rm.NMwaitForState(lostNode.getNodeId(),NodeState.RUNNING); rm.sendNodeLost(lostNode); Configuration conf=new Configuration(); YarnRPC rpc=YarnRPC.create(conf); InetSocketAddress rmAddress=rm.getClientRMService().getBindAddress(); LOG.info("Connecting to ResourceManager at " + rmAddress); ApplicationClientProtocol client=(ApplicationClientProtocol)rpc.getProxy(ApplicationClientProtocol.class,rmAddress,conf); GetClusterNodesRequest request=GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.RUNNING)); List nodeReports=client.getClusterNodes(request).getNodeReports(); Assert.assertEquals(1,nodeReports.size()); Assert.assertNotSame("Node is expected to be healthy!",NodeState.UNHEALTHY,nodeReports.get(0).getNodeState()); node.nodeHeartbeat(false); nodeReports=client.getClusterNodes(request).getNodeReports(); Assert.assertEquals("Unhealthy nodes should not show up by default",0,nodeReports.size()); request=GetClusterNodesRequest.newInstance(EnumSet.of(NodeState.UNHEALTHY)); nodeReports=client.getClusterNodes(request).getNodeReports(); Assert.assertEquals(1,nodeReports.size()); Assert.assertEquals("Node is expected to be unhealthy!",NodeState.UNHEALTHY,nodeReports.get(0).getNodeState()); rm.registerNode("host3:1236",1024); request=GetClusterNodesRequest.newInstance(EnumSet.allOf(NodeState.class)); nodeReports=client.getClusterNodes(request).getNodeReports(); Assert.assertEquals(3,nodeReports.size()); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetApplicationAttemptReport() throws YarnException, IOException { ClientRMService rmService=createRMService(); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null); GetApplicationAttemptReportRequest request=recordFactory.newRecordInstance(GetApplicationAttemptReportRequest.class); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1); request.setApplicationAttemptId(attemptId); try { GetApplicationAttemptReportResponse response=rmService.getApplicationAttemptReport(request); Assert.assertEquals(attemptId,response.getApplicationAttemptReport().getApplicationAttemptId()); } catch ( ApplicationNotFoundException ex) { Assert.fail(ex.getMessage()); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetApplicationAttempts() throws YarnException, IOException { ClientRMService rmService=createRMService(); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null); GetApplicationAttemptsRequest request=recordFactory.newRecordInstance(GetApplicationAttemptsRequest.class); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1); request.setApplicationId(ApplicationId.newInstance(123456,1)); try { GetApplicationAttemptsResponse response=rmService.getApplicationAttempts(request); Assert.assertEquals(1,response.getApplicationAttemptList().size()); Assert.assertEquals(attemptId,response.getApplicationAttemptList().get(0).getApplicationAttemptId()); } catch ( ApplicationNotFoundException ex) { Assert.fail(ex.getMessage()); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetApplications() throws IOException, YarnException { YarnScheduler yarnScheduler=mockYarnScheduler(); RMContext rmContext=mock(RMContext.class); mockRMContext(yarnScheduler,rmContext); RMStateStore stateStore=mock(RMStateStore.class); when(rmContext.getStateStore()).thenReturn(stateStore); RMAppManager appManager=new RMAppManager(rmContext,yarnScheduler,null,mock(ApplicationACLsManager.class),new Configuration()); when(rmContext.getDispatcher().getEventHandler()).thenReturn(new EventHandler(){ public void handle( Event event){ } } ); ApplicationACLsManager mockAclsManager=mock(ApplicationACLsManager.class); QueueACLsManager mockQueueACLsManager=mock(QueueACLsManager.class); when(mockQueueACLsManager.checkAccess(any(UserGroupInformation.class),any(QueueACL.class),anyString())).thenReturn(true); ClientRMService rmService=new ClientRMService(rmContext,yarnScheduler,appManager,mockAclsManager,mockQueueACLsManager,null); String[] queues={QUEUE_1,QUEUE_2}; String[] appNames={MockApps.newAppName(),MockApps.newAppName(),MockApps.newAppName()}; ApplicationId[] appIds={getApplicationId(101),getApplicationId(102),getApplicationId(103)}; List tags=Arrays.asList("Tag1","Tag2","Tag3"); long[] submitTimeMillis=new long[3]; for (int i=0; i < appIds.length; i++) { ApplicationId appId=appIds[i]; when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(),ApplicationAccessType.VIEW_APP,null,appId)).thenReturn(true); SubmitApplicationRequest submitRequest=mockSubmitAppRequest(appId,appNames[i],queues[i % queues.length],new HashSet(tags.subList(0,i + 1))); rmService.submitApplication(submitRequest); submitTimeMillis[i]=System.currentTimeMillis(); } GetApplicationsRequest request=GetApplicationsRequest.newInstance(); assertEquals("Incorrect total number of apps",6,rmService.getApplications(request).getApplicationList().size()); request.setLimit(1L); assertEquals("Failed to limit applications",1,rmService.getApplications(request).getApplicationList().size()); request=GetApplicationsRequest.newInstance(); request.setStartRange(submitTimeMillis[0],System.currentTimeMillis()); assertEquals("Incorrect number of matching start range",2,rmService.getApplications(request).getApplicationList().size()); request.setStartRange(submitTimeMillis[1],System.currentTimeMillis()); assertEquals("Incorrect number of matching start range",1,rmService.getApplications(request).getApplicationList().size()); request.setStartRange(submitTimeMillis[2],System.currentTimeMillis()); assertEquals("Incorrect number of matching start range",0,rmService.getApplications(request).getApplicationList().size()); request=GetApplicationsRequest.newInstance(); Set queueSet=new HashSet(); request.setQueues(queueSet); queueSet.add(queues[0]); assertEquals("Incorrect number of applications in queue",2,rmService.getApplications(request).getApplicationList().size()); assertEquals("Incorrect number of applications in queue",2,rmService.getApplications(request,false).getApplicationList().size()); queueSet.add(queues[1]); assertEquals("Incorrect number of applications in queue",3,rmService.getApplications(request).getApplicationList().size()); request=GetApplicationsRequest.newInstance(); Set userSet=new HashSet(); request.setUsers(userSet); userSet.add("random-user-name"); assertEquals("Incorrect number of applications for user",0,rmService.getApplications(request).getApplicationList().size()); userSet.add(UserGroupInformation.getCurrentUser().getShortUserName()); assertEquals("Incorrect number of applications for user",3,rmService.getApplications(request).getApplicationList().size()); request=GetApplicationsRequest.newInstance(ApplicationsRequestScope.ALL,null,null,null,null,null,null,null,null); Set tagSet=new HashSet(); request.setApplicationTags(tagSet); assertEquals("Incorrect number of matching tags",6,rmService.getApplications(request).getApplicationList().size()); tagSet=Sets.newHashSet(tags.get(0)); request.setApplicationTags(tagSet); assertEquals("Incorrect number of matching tags",3,rmService.getApplications(request).getApplicationList().size()); tagSet=Sets.newHashSet(tags.get(1)); request.setApplicationTags(tagSet); assertEquals("Incorrect number of matching tags",2,rmService.getApplications(request).getApplicationList().size()); tagSet=Sets.newHashSet(tags.get(2)); request.setApplicationTags(tagSet); assertEquals("Incorrect number of matching tags",1,rmService.getApplications(request).getApplicationList().size()); request=GetApplicationsRequest.newInstance(ApplicationsRequestScope.VIEWABLE); assertEquals("Incorrect number of applications for the scope",6,rmService.getApplications(request).getApplicationList().size()); request=GetApplicationsRequest.newInstance(ApplicationsRequestScope.OWN); assertEquals("Incorrect number of applications for the scope",3,rmService.getApplications(request).getApplicationList().size()); }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetContainers() throws YarnException, IOException { ClientRMService rmService=createRMService(); RecordFactory recordFactory=RecordFactoryProvider.getRecordFactory(null); GetContainersRequest request=recordFactory.newRecordInstance(GetContainersRequest.class); ApplicationAttemptId attemptId=ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456,1),1); ContainerId containerId=ContainerId.newInstance(attemptId,1); request.setApplicationAttemptId(attemptId); try { GetContainersResponse response=rmService.getContainers(request); Assert.assertEquals(containerId,response.getContainerList().get(0).getContainerId()); } catch ( ApplicationNotFoundException ex) { Assert.fail(ex.getMessage()); } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestClientRMTokens

    UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testDelegationToken() throws IOException, InterruptedException { final YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.RM_PRINCIPAL,"testuser/localhost@apache.org"); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); ResourceScheduler scheduler=createMockScheduler(conf); long initialInterval=10000l; long maxLifetime=20000l; long renewInterval=10000l; RMDelegationTokenSecretManager rmDtSecretManager=createRMDelegationTokenSecretManager(initialInterval,maxLifetime,renewInterval); rmDtSecretManager.startThreads(); LOG.info("Creating DelegationTokenSecretManager with initialInterval: " + initialInterval + ", maxLifetime: "+ maxLifetime+ ", renewInterval: "+ renewInterval); final ClientRMService clientRMService=new ClientRMServiceForTest(conf,scheduler,rmDtSecretManager); clientRMService.init(conf); clientRMService.start(); ApplicationClientProtocol clientRMWithDT=null; try { UserGroupInformation loggedInUser=UserGroupInformation.createRemoteUser("testrenewer@APACHE.ORG"); Assert.assertEquals("testrenewer",loggedInUser.getShortUserName()); loggedInUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS); org.apache.hadoop.yarn.api.records.Token token=getDelegationToken(loggedInUser,clientRMService,loggedInUser.getShortUserName()); long tokenFetchTime=System.currentTimeMillis(); LOG.info("Got delegation token at: " + tokenFetchTime); clientRMWithDT=getClientRMProtocolWithDT(token,clientRMService.getBindAddress(),"loginuser1",conf); GetNewApplicationRequest request=Records.newRecord(GetNewApplicationRequest.class); try { clientRMWithDT.getNewApplication(request); } catch ( IOException e) { fail("Unexpected exception" + e); } catch ( YarnException e) { fail("Unexpected exception" + e); } while (System.currentTimeMillis() < tokenFetchTime + initialInterval / 2) { Thread.sleep(500l); } long nextExpTime=renewDelegationToken(loggedInUser,clientRMService,token); long renewalTime=System.currentTimeMillis(); LOG.info("Renewed token at: " + renewalTime + ", NextExpiryTime: "+ nextExpTime); while (System.currentTimeMillis() > tokenFetchTime + initialInterval && System.currentTimeMillis() < nextExpTime) { Thread.sleep(500l); } Thread.sleep(50l); try { clientRMWithDT.getNewApplication(request); } catch ( IOException e) { fail("Unexpected exception" + e); } catch ( YarnException e) { fail("Unexpected exception" + e); } while (System.currentTimeMillis() < renewalTime + renewInterval) { Thread.sleep(500l); } Thread.sleep(50l); LOG.info("At time: " + System.currentTimeMillis() + ", token should be invalid"); try { clientRMWithDT.getNewApplication(request); fail("Should not have succeeded with an expired token"); } catch ( Exception e) { assertEquals(InvalidToken.class.getName(),e.getClass().getName()); assertTrue(e.getMessage().contains("is expired")); } if (clientRMWithDT != null) { RPC.stopProxy(clientRMWithDT); clientRMWithDT=null; } token=getDelegationToken(loggedInUser,clientRMService,loggedInUser.getShortUserName()); tokenFetchTime=System.currentTimeMillis(); LOG.info("Got delegation token at: " + tokenFetchTime); clientRMWithDT=getClientRMProtocolWithDT(token,clientRMService.getBindAddress(),"loginuser2",conf); request=Records.newRecord(GetNewApplicationRequest.class); try { clientRMWithDT.getNewApplication(request); } catch ( IOException e) { fail("Unexpected exception" + e); } catch ( YarnException e) { fail("Unexpected exception" + e); } cancelDelegationToken(loggedInUser,clientRMService,token); if (clientRMWithDT != null) { RPC.stopProxy(clientRMWithDT); clientRMWithDT=null; } clientRMWithDT=getClientRMProtocolWithDT(token,clientRMService.getBindAddress(),"loginuser2",conf); LOG.info("Cancelled delegation token at: " + System.currentTimeMillis()); try { clientRMWithDT.getNewApplication(request); fail("Should not have succeeded with a cancelled delegation token"); } catch ( IOException e) { } catch ( YarnException e) { } } finally { rmDtSecretManager.stopThreads(); if (clientRMWithDT != null) { RPC.stopProxy(clientRMWithDT); } } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestFifoScheduler

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=50000) public void testBlackListNodes() throws Exception { Configuration conf=new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,FifoScheduler.class,ResourceScheduler.class); MockRM rm=new MockRM(conf); rm.start(); FifoScheduler fs=(FifoScheduler)rm.getResourceScheduler(); int rack_num_0=0; int rack_num_1=1; String host_0_0="127.0.0.1"; RMNode n1=MockNodes.newNodeInfo(rack_num_0,MockNodes.newResource(4 * GB),1,host_0_0); fs.handle(new NodeAddedSchedulerEvent(n1)); String host_0_1="127.0.0.2"; RMNode n2=MockNodes.newNodeInfo(rack_num_0,MockNodes.newResource(4 * GB),1,host_0_1); fs.handle(new NodeAddedSchedulerEvent(n2)); String host_1_0="127.0.0.3"; RMNode n3=MockNodes.newNodeInfo(rack_num_1,MockNodes.newResource(4 * GB),1,host_1_0); fs.handle(new NodeAddedSchedulerEvent(n3)); String host_1_1="127.0.0.4"; RMNode n4=MockNodes.newNodeInfo(rack_num_1,MockNodes.newResource(4 * GB),1,host_1_1); fs.handle(new NodeAddedSchedulerEvent(n4)); ApplicationId appId1=BuilderUtils.newApplicationId(100,1); ApplicationAttemptId appAttemptId1=BuilderUtils.newApplicationAttemptId(appId1,1); SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId1,"queue","user"); fs.handle(appEvent); SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId1,false); fs.handle(attemptEvent); List emptyId=new ArrayList(); List emptyAsk=new ArrayList(); List ask1=new ArrayList(); ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),"rack1",BuilderUtils.newResource(GB,1),1)); ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(GB,1),1)); fs.allocate(appAttemptId1,ask1,emptyId,Collections.singletonList(host_1_0),null); fs.handle(new NodeUpdateSchedulerEvent(n3)); Allocation allocation1=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null); Assert.assertEquals("allocation1",0,allocation1.getContainers().size()); fs.handle(new NodeUpdateSchedulerEvent(n4)); Allocation allocation2=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null); Assert.assertEquals("allocation2",1,allocation2.getContainers().size()); List containerList=allocation2.getContainers(); for ( Container container : containerList) { Assert.assertEquals("Container is allocated on n4",container.getNodeId(),n4.getNodeID()); } List ask2=new ArrayList(); ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(GB,1),1)); fs.allocate(appAttemptId1,ask2,emptyId,Collections.singletonList("rack0"),null); fs.handle(new NodeUpdateSchedulerEvent(n1)); Allocation allocation3=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null); Assert.assertEquals("allocation3",0,allocation3.getContainers().size()); fs.handle(new NodeUpdateSchedulerEvent(n2)); Allocation allocation4=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null); Assert.assertEquals("allocation4",0,allocation4.getContainers().size()); fs.handle(new NodeUpdateSchedulerEvent(n3)); Allocation allocation5=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null); Assert.assertEquals("allocation5",0,allocation5.getContainers().size()); fs.handle(new NodeUpdateSchedulerEvent(n4)); Allocation allocation6=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null); Assert.assertEquals("allocation6",1,allocation6.getContainers().size()); containerList=allocation6.getContainers(); for ( Container container : containerList) { Assert.assertEquals("Container is allocated on n4",container.getNodeId(),n4.getNodeID()); } rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void test() throws Exception { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); MockRM rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("127.0.0.1:1234",6 * GB); MockNM nm2=rm.registerNode("127.0.0.2:5678",4 * GB); RMApp app1=rm.submitApp(2048); nm1.nodeHeartbeat(true); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); SchedulerNodeReport report_nm1=rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); Assert.assertEquals(2 * GB,report_nm1.getUsedResource().getMemory()); RMApp app2=rm.submitApp(2048); nm2.nodeHeartbeat(true); RMAppAttempt attempt2=app2.getCurrentAppAttempt(); MockAM am2=rm.sendAMLaunched(attempt2.getAppAttemptId()); am2.registerAppAttempt(); SchedulerNodeReport report_nm2=rm.getResourceScheduler().getNodeReport(nm2.getNodeId()); Assert.assertEquals(2 * GB,report_nm2.getUsedResource().getMemory()); am1.addRequests(new String[]{"127.0.0.1","127.0.0.2"},GB,1,1); AllocateResponse alloc1Response=am1.schedule(); am2.addRequests(new String[]{"127.0.0.1","127.0.0.2"},3 * GB,0,1); AllocateResponse alloc2Response=am2.schedule(); nm1.nodeHeartbeat(true); while (alloc1Response.getAllocatedContainers().size() < 1) { LOG.info("Waiting for containers to be created for app 1..."); Thread.sleep(1000); alloc1Response=am1.schedule(); } while (alloc2Response.getAllocatedContainers().size() < 1) { LOG.info("Waiting for containers to be created for app 2..."); Thread.sleep(1000); alloc2Response=am2.schedule(); } nm2.nodeHeartbeat(true); List allocated1=alloc1Response.getAllocatedContainers(); Assert.assertEquals(1,allocated1.size()); Assert.assertEquals(1 * GB,allocated1.get(0).getResource().getMemory()); Assert.assertEquals(nm1.getNodeId(),allocated1.get(0).getNodeId()); List allocated2=alloc2Response.getAllocatedContainers(); Assert.assertEquals(1,allocated2.size()); Assert.assertEquals(3 * GB,allocated2.get(0).getResource().getMemory()); Assert.assertEquals(nm1.getNodeId(),allocated2.get(0).getNodeId()); report_nm1=rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); report_nm2=rm.getResourceScheduler().getNodeReport(nm2.getNodeId()); Assert.assertEquals(0,report_nm1.getAvailableResource().getMemory()); Assert.assertEquals(2 * GB,report_nm2.getAvailableResource().getMemory()); Assert.assertEquals(6 * GB,report_nm1.getUsedResource().getMemory()); Assert.assertEquals(2 * GB,report_nm2.getUsedResource().getMemory()); Container c1=allocated1.get(0); Assert.assertEquals(GB,c1.getResource().getMemory()); ContainerStatus containerStatus=BuilderUtils.newContainerStatus(c1.getId(),ContainerState.COMPLETE,"",0); nm1.containerStatus(containerStatus); int waitCount=0; while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) { LOG.info("Waiting for containers to be finished for app 1... Tried " + waitCount + " times already.."); Thread.sleep(1000); } Assert.assertEquals(1,attempt1.getJustFinishedContainers().size()); Assert.assertEquals(1,am1.schedule().getCompletedContainersStatuses().size()); report_nm1=rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); Assert.assertEquals(5 * GB,report_nm1.getUsedResource().getMemory()); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=50000) public void testHeadroom() throws Exception { Configuration conf=new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,FifoScheduler.class,ResourceScheduler.class); MockRM rm=new MockRM(conf); rm.start(); FifoScheduler fs=(FifoScheduler)rm.getResourceScheduler(); RMNode n1=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,"127.0.0.2"); fs.handle(new NodeAddedSchedulerEvent(n1)); ApplicationId appId1=BuilderUtils.newApplicationId(100,1); ApplicationAttemptId appAttemptId1=BuilderUtils.newApplicationAttemptId(appId1,1); SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId1,"queue","user"); fs.handle(appEvent); SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId1,false); fs.handle(attemptEvent); ApplicationId appId2=BuilderUtils.newApplicationId(200,2); ApplicationAttemptId appAttemptId2=BuilderUtils.newApplicationAttemptId(appId2,1); SchedulerEvent appEvent2=new AppAddedSchedulerEvent(appId2,"queue","user"); fs.handle(appEvent2); SchedulerEvent attemptEvent2=new AppAttemptAddedSchedulerEvent(appAttemptId2,false); fs.handle(attemptEvent2); List emptyId=new ArrayList(); List emptyAsk=new ArrayList(); List ask1=new ArrayList(); ask1.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(GB,1),1)); fs.allocate(appAttemptId1,ask1,emptyId,null,null); List ask2=new ArrayList(); ask2.add(BuilderUtils.newResourceRequest(BuilderUtils.newPriority(0),ResourceRequest.ANY,BuilderUtils.newResource(2 * GB,1),1)); fs.allocate(appAttemptId2,ask2,emptyId,null,null); fs.handle(new NodeUpdateSchedulerEvent(n1)); Allocation allocation1=fs.allocate(appAttemptId1,emptyAsk,emptyId,null,null); Assert.assertEquals("Allocation headroom",1 * GB,allocation1.getResourceLimit().getMemory()); Allocation allocation2=fs.allocate(appAttemptId2,emptyAsk,emptyId,null,null); Assert.assertEquals("Allocation headroom",1 * GB,allocation2.getResourceLimit().getMemory()); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=50000) public void testReconnectedNode() throws Exception { CapacitySchedulerConfiguration conf=new CapacitySchedulerConfiguration(); conf.setQueues("default",new String[]{"default"}); conf.setCapacity("default",100); FifoScheduler fs=new FifoScheduler(); fs.init(conf); fs.start(); RMContext context=mock(RMContext.class); fs.reinitialize(conf,null); fs.setRMContext(context); RMNode n1=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1,"127.0.0.2"); RMNode n2=MockNodes.newNodeInfo(0,MockNodes.newResource(2 * GB),2,"127.0.0.3"); fs.handle(new NodeAddedSchedulerEvent(n1)); fs.handle(new NodeAddedSchedulerEvent(n2)); fs.handle(new NodeUpdateSchedulerEvent(n1)); Assert.assertEquals(6 * GB,fs.getRootQueueMetrics().getAvailableMB()); n1=MockNodes.newNodeInfo(0,MockNodes.newResource(2 * GB),1,"127.0.0.2"); fs.handle(new NodeRemovedSchedulerEvent(n1)); fs.handle(new NodeAddedSchedulerEvent(n1)); fs.handle(new NodeUpdateSchedulerEvent(n1)); Assert.assertEquals(4 * GB,fs.getRootQueueMetrics().getAvailableMB()); fs.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestKillApplicationWithRMHA

    EqualityVerifier 
    @Test(timeout=20000) public void testKillAppWhenFailOverHappensDuringApplicationKill() throws Exception { startRMsWithCustomizedClientRMService(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200); MockAM am0=launchAM(app0,rm1,nm1); Assert.assertEquals(app0.getState(),RMAppState.RUNNING); rm1.killApp(app0.getApplicationId()); failOverAndKillApp(app0.getApplicationId(),am0.getApplicationAttemptId(),RMAppState.RUNNING,RMAppAttemptState.RUNNING,RMAppState.ACCEPTED); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestMoveApplication

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testMoveTooLate() throws Exception { Application application=new Application("user1",resourceManager); ApplicationId appId=application.getApplicationId(); application.submit(); ClientRMService clientRMService=resourceManager.getClientRMService(); clientRMService.forceKillApplication(KillApplicationRequest.newInstance(appId)); RMApp rmApp=resourceManager.getRMContext().getRMApps().get(appId); while (rmApp.getState() != RMAppState.KILLED) { Thread.sleep(100); } try { clientRMService.moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest.newInstance(appId,"newqueue")); fail("Should have hit exception"); } catch ( YarnException ex) { assertEquals(YarnException.class,ex.getClass()); assertEquals("App in KILLED state cannot be moved.",ex.getMessage()); } }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testMoveRejectedByScheduler() throws Exception { failMove=true; Application application=new Application("user1",resourceManager); application.submit(); RMApp app=resourceManager.rmContext.getRMApps().get(application.getApplicationId()); while (app.getState() != RMAppState.ACCEPTED) { Thread.sleep(100); } ClientRMService clientRMService=resourceManager.getClientRMService(); try { clientRMService.moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest.newInstance(application.getApplicationId(),"newqueue")); fail("Should have hit exception"); } catch ( YarnException ex) { assertEquals("Move not supported",ex.getCause().getMessage()); } }

    UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testMoveRejectedByPermissions() throws Exception { failMove=true; final Application application=new Application("user1",resourceManager); application.submit(); final ClientRMService clientRMService=resourceManager.getClientRMService(); try { UserGroupInformation.createRemoteUser("otheruser").doAs(new PrivilegedExceptionAction(){ @Override public MoveApplicationAcrossQueuesResponse run() throws Exception { return clientRMService.moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest.newInstance(application.getApplicationId(),"newqueue")); } } ); fail("Should have hit exception"); } catch ( Exception ex) { assertEquals(AccessControlException.class,ex.getCause().getCause().getClass()); } }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=10000) public void testMoveSuccessful() throws Exception { MockRM rm1=new MockRM(conf); rm1.start(); RMApp app=rm1.submitApp(1024); ClientRMService clientRMService=rm1.getClientRMService(); clientRMService.moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest.newInstance(app.getApplicationId(),"newqueue")); RMApp rmApp=rm1.getRMContext().getRMApps().get(app.getApplicationId()); assertEquals("newqueue",rmApp.getQueue()); rm1.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestRM

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Validate killing an application when it is at accepted state. * @throws Exception exception */ @Test(timeout=60000) public void testApplicationKillAtAcceptedState() throws Exception { YarnConfiguration conf=new YarnConfiguration(); final Dispatcher dispatcher=new AsyncDispatcher(){ @Override public EventHandler getEventHandler(){ class EventArgMatcher extends ArgumentMatcher { @Override public boolean matches( Object argument){ if (argument instanceof RMAppAttemptEvent) { if (((RMAppAttemptEvent)argument).getType().equals(RMAppAttemptEventType.KILL)) { return true; } } return false; } } EventHandler handler=spy(super.getEventHandler()); doNothing().when(handler).handle(argThat(new EventArgMatcher())); return handler; } } ; MockRM rm=new MockRM(conf){ @Override protected Dispatcher createDispatcher(){ return dispatcher; } } ; QueueMetrics metrics=rm.getResourceScheduler().getRootQueueMetrics(); int appsKilled=metrics.getAppsKilled(); int appsSubmitted=metrics.getAppsSubmitted(); rm.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm.getResourceTrackerService()); nm1.registerNode(); RMApp application=rm.submitApp(200); MockAM am=MockRM.launchAM(application,rm,nm1); am.waitForState(RMAppAttemptState.LAUNCHED); nm1.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.RUNNING); rm.waitForState(application.getApplicationId(),RMAppState.ACCEPTED); KillApplicationRequest request=KillApplicationRequest.newInstance(application.getApplicationId()); rm.getClientRMService().forceKillApplication(request); am.registerAppAttempt(false); rm.waitForState(application.getApplicationId(),RMAppState.KILLING); rm.waitForState(am.getApplicationAttemptId(),RMAppAttemptState.RUNNING); rm.getRMContext().getDispatcher().getEventHandler().handle(new RMAppEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_KILLED)); rm.waitForState(application.getApplicationId(),RMAppState.KILLED); metrics=rm.getResourceScheduler().getRootQueueMetrics(); Assert.assertEquals(appsKilled + 1,metrics.getAppsKilled()); Assert.assertEquals(appsSubmitted + 1,metrics.getAppsSubmitted()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testInvalidatedAMHostPortOnAMRestart() throws Exception { YarnConfiguration conf=new YarnConfiguration(); MockRM rm1=new MockRM(conf); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app2=rm1.submitApp(200); MockAM am2=MockRM.launchAndRegisterAM(app2,rm1,nm1); nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE); am2.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED); GetApplicationReportRequest request1=GetApplicationReportRequest.newInstance(app2.getApplicationId()); ApplicationReport report1=rm1.getClientRMService().getApplicationReport(request1).getApplicationReport(); Assert.assertEquals("N/A",report1.getHost()); Assert.assertEquals(-1,report1.getRpcPort()); }

    APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=80000) public void testInvalidateAMHostPortWhenAMFailedOrKilled() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); MockRM rm1=new MockRM(conf); rm1.start(); RMApp app1=rm1.submitApp(200); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am1); RMApp app2=rm1.submitApp(200); MockAM am2=MockRM.launchAndRegisterAM(app2,rm1,nm1); nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE); am2.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app2.getApplicationId(),RMAppState.FAILED); RMApp app3=rm1.submitApp(200); MockAM am3=MockRM.launchAndRegisterAM(app3,rm1,nm1); rm1.killApp(app3.getApplicationId()); rm1.waitForState(app3.getApplicationId(),RMAppState.KILLED); rm1.waitForState(am3.getApplicationAttemptId(),RMAppAttemptState.KILLED); GetApplicationsRequest request1=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED,YarnApplicationState.KILLED,YarnApplicationState.FAILED)); GetApplicationsResponse response1=rm1.getClientRMService().getApplications(request1); List appList1=response1.getApplicationList(); Assert.assertEquals(3,appList1.size()); for ( ApplicationReport report : appList1) { if (report.getApplicationId().equals(app2.getApplicationId()) || report.getApplicationId().equals(app3.getApplicationId())) { Assert.assertEquals("N/A",report.getHost()); Assert.assertEquals(-1,report.getRpcPort()); } if (report.getApplicationId().equals(app1.getApplicationId())) { Assert.assertFalse(report.getHost().equals("N/A")); Assert.assertTrue(report.getRpcPort() != -1); } } }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testAppOnMultiNode() throws Exception { Logger rootLogger=LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); YarnConfiguration conf=new YarnConfiguration(); conf.set("yarn.scheduler.capacity.node-locality-delay","-1"); MockRM rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("h1:1234",5120); MockNM nm2=rm.registerNode("h2:5678",10240); RMApp app=rm.submitApp(2000); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app.getCurrentAppAttempt(); MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId()); am.registerAppAttempt(); int request=13; am.allocate("h1",1000,request,new ArrayList()); List conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); int contReceived=conts.size(); while (contReceived < 3) { nm1.nodeHeartbeat(true); conts.addAll(am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers()); contReceived=conts.size(); LOG.info("Got " + contReceived + " containers. Waiting to get "+ 3); Thread.sleep(WAIT_SLEEP_MS); } Assert.assertEquals(3,conts.size()); conts=am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); contReceived=conts.size(); while (contReceived < 10) { nm2.nodeHeartbeat(true); conts.addAll(am.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers()); contReceived=conts.size(); LOG.info("Got " + contReceived + " containers. Waiting to get "+ 10); Thread.sleep(WAIT_SLEEP_MS); } Assert.assertEquals(10,conts.size()); am.unregisterAppAttempt(); nm1.nodeHeartbeat(attempt.getAppAttemptId(),1,ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FINISHED); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=20000) public void testNMTokenSentForNormalContainer() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class.getCanonicalName()); MockRM rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("h1:1234",5120); RMApp app=rm.submitApp(2000); RMAppAttempt attempt=app.getCurrentAppAttempt(); CapacityScheduler cs=(CapacityScheduler)rm.getResourceScheduler(); cs.getApplicationAttempt(attempt.getAppAttemptId()).getNewContainerId(); nm1.nodeHeartbeat(true); MockAM am=MockRM.launchAM(app,rm,nm1); Assert.assertTrue(attempt.getMasterContainer().getId().getId() != 1); Assert.assertFalse(rm.getRMContext().getNMTokenSecretManager().isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId())); am.registerAppAttempt(); rm.waitForState(app.getApplicationId(),RMAppState.RUNNING); int NUM_CONTAINERS=1; List containers=new ArrayList(); List expectedNMTokens=new ArrayList(); while (true) { AllocateResponse response=am.allocate("127.0.0.1",2000,NUM_CONTAINERS,new ArrayList()); nm1.nodeHeartbeat(true); containers.addAll(response.getAllocatedContainers()); expectedNMTokens.addAll(response.getNMTokens()); if (containers.size() == NUM_CONTAINERS) { break; } Thread.sleep(200); System.out.println("Waiting for container to be allocated."); } NodeId nodeId=expectedNMTokens.get(0).getNodeId(); Assert.assertEquals(nm1.getNodeId(),nodeId); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=40000) public void testNMToken() throws Exception { MockRM rm=new MockRM(); try { rm.start(); MockNM nm1=rm.registerNode("h1:1234",10000); NMTokenSecretManagerInRM nmTokenSecretManager=rm.getRMContext().getNMTokenSecretManager(); RMApp app=rm.submitApp(1000); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app.getCurrentAppAttempt(); MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId()); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId())); am.registerAppAttempt(); ArrayList containersReceivedForNM1=new ArrayList(); List releaseContainerList=new ArrayList(); HashMap nmTokens=new HashMap(); AllocateResponse response=am.allocate("h1",1000,2,releaseContainerList); Assert.assertEquals(0,response.getAllocatedContainers().size()); allocateContainersAndValidateNMTokens(am,containersReceivedForNM1,2,nmTokens,nm1); Assert.assertEquals(1,nmTokens.size()); response=am.allocate("h1",1000,2,releaseContainerList); Assert.assertEquals(0,response.getAllocatedContainers().size()); allocateContainersAndValidateNMTokens(am,containersReceivedForNM1,4,nmTokens,nm1); Assert.assertEquals(1,nmTokens.size()); MockNM nm2=rm.registerNode("h2:1234",10000); nm2.nodeHeartbeat(true); ArrayList containersReceivedForNM2=new ArrayList(); response=am.allocate("h2",1000,2,releaseContainerList); Assert.assertEquals(0,response.getAllocatedContainers().size()); allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,2,nmTokens,nm2); Assert.assertEquals(2,nmTokens.size()); nm2=rm.registerNode("h2:1234",10000); Map nodes=rm.getRMContext().getRMNodes(); while (nodes.get(nm2.getNodeId()).getLastNodeHeartBeatResponse().getResponseId() > 0) { Thread.sleep(WAIT_SLEEP_MS); } int interval=40; while (nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId()) && interval-- > 0) { LOG.info("waiting for nmToken to be cleared for : " + nm2.getNodeId()); Thread.sleep(WAIT_SLEEP_MS); } Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId())); nmTokens.remove(nm2.getNodeId().toString()); Assert.assertEquals(1,nmTokens.size()); response=am.allocate("h2",1000,2,releaseContainerList); Assert.assertEquals(0,response.getAllocatedContainers().size()); allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,4,nmTokens,nm2); Assert.assertEquals(2,nmTokens.size()); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId())); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId())); nmTokenSecretManager.rollMasterKey(); nmTokenSecretManager.activateNextMasterKey(); Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm1.getNodeId())); Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId())); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId())); nmTokens.clear(); Assert.assertEquals(0,nmTokens.size()); response=am.allocate("h2",1000,1,releaseContainerList); Assert.assertEquals(0,response.getAllocatedContainers().size()); allocateContainersAndValidateNMTokens(am,containersReceivedForNM2,5,nmTokens,nm2); Assert.assertEquals(1,nmTokens.size()); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptNMTokenPresent(attempt.getAppAttemptId(),nm2.getNodeId())); Assert.assertTrue(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId())); am.unregisterAppAttempt(); for ( Container container : containersReceivedForNM1) { nm1.nodeHeartbeat(attempt.getAppAttemptId(),container.getId().getId(),ContainerState.COMPLETE); } for ( Container container : containersReceivedForNM2) { nm2.nodeHeartbeat(attempt.getAppAttemptId(),container.getId().getId(),ContainerState.COMPLETE); } nm1.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FINISHED); Assert.assertFalse(nmTokenSecretManager.isApplicationAttemptRegistered(attempt.getAppAttemptId())); } finally { rm.stop(); } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestRMAdminService

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testAdminRefreshQueuesWithLocalConfigurationProvider() throws IOException, YarnException { rm=new MockRM(configuration); rm.init(configuration); rm.start(); CapacityScheduler cs=(CapacityScheduler)rm.getRMContext().getScheduler(); int maxAppsBefore=cs.getConfiguration().getMaximumSystemApplications(); try { rm.adminService.refreshQueues(RefreshQueuesRequest.newInstance()); Assert.assertEquals(maxAppsBefore,cs.getConfiguration().getMaximumSystemApplications()); } catch ( Exception ex) { fail("Using localConfigurationProvider. Should not get any exception."); } }

    APIUtilityVerifier BranchVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test public void testRMInitialsWithFileSystemBasedConfigurationProvider() throws Exception { configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); final File excludeHostsFile=new File(tmpDir.toString(),"excludeHosts"); if (excludeHostsFile.exists()) { excludeHostsFile.delete(); } if (!excludeHostsFile.createNewFile()) { Assert.fail("Can not create " + "excludeHosts"); } PrintWriter fileWriter=new PrintWriter(excludeHostsFile); fileWriter.write("0.0.0.0:123"); fileWriter.close(); uploadToRemoteFileSystem(new Path(excludeHostsFile.getAbsolutePath())); YarnConfiguration yarnConf=new YarnConfiguration(); yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL,"world:anyone:rwcda"); yarnConf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,this.workingPath + "/excludeHosts"); uploadConfiguration(yarnConf,"yarn-site.xml"); CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); csConf.set("yarn.scheduler.capacity.maximum-applications","5000"); uploadConfiguration(csConf,"capacity-scheduler.xml"); String aclsString="alice,bob users,wheel"; Configuration newConf=new Configuration(); newConf.set("security.applicationclient.protocol.acl",aclsString); uploadConfiguration(newConf,"hadoop-policy.xml"); Configuration conf=new Configuration(); conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,true); conf.set("hadoop.proxyuser.test.groups","test_groups"); conf.set("hadoop.proxyuser.test.hosts","test_hosts"); conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,MockUnixGroupsMapping.class,GroupMappingServiceProvider.class); uploadConfiguration(conf,"core-site.xml"); MockUnixGroupsMapping.updateGroups(); ResourceManager resourceManager=null; try { try { resourceManager=new ResourceManager(); resourceManager.init(configuration); resourceManager.start(); } catch ( Exception ex) { fail("Should not get any exceptions"); } Set excludeHosts=resourceManager.getRMContext().getNodesListManager().getHostsReader().getExcludedHosts(); Assert.assertTrue(excludeHosts.size() == 1); Assert.assertTrue(excludeHosts.contains("0.0.0.0:123")); String aclStringAfter=resourceManager.adminService.getAccessControlList().getAclString().trim(); Assert.assertEquals(aclStringAfter,"world:anyone:rwcda"); CapacityScheduler cs=(CapacityScheduler)resourceManager.getRMContext().getScheduler(); int maxAppsAfter=cs.getConfiguration().getMaximumSystemApplications(); Assert.assertEquals(maxAppsAfter,5000); ServiceAuthorizationManager adminServiceServiceManager=resourceManager.adminService.getServer().getServiceAuthorizationManager(); verifyServiceACLsRefresh(adminServiceServiceManager,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString); ServiceAuthorizationManager clientRMServiceServiceManager=resourceManager.getRMContext().getClientRMService().getServer().getServiceAuthorizationManager(); verifyServiceACLsRefresh(clientRMServiceServiceManager,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString); ServiceAuthorizationManager appMasterService=resourceManager.getRMContext().getApplicationMasterService().getServer().getServiceAuthorizationManager(); verifyServiceACLsRefresh(appMasterService,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString); ServiceAuthorizationManager RTService=resourceManager.getRMContext().getResourceTrackerService().getServer().getServiceAuthorizationManager(); verifyServiceACLsRefresh(RTService,org.apache.hadoop.yarn.api.ApplicationClientProtocolPB.class,aclsString); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").size() == 1); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get("hadoop.proxyuser.test.groups").contains("test_groups")); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").size() == 1); Assert.assertTrue(ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get("hadoop.proxyuser.test.hosts").contains("test_hosts")); List groupAfter=Groups.getUserToGroupsMappingService(configuration).getGroups(UserGroupInformation.getCurrentUser().getUserName()); Assert.assertTrue(groupAfter.contains("test_group_D") && groupAfter.contains("test_group_E") && groupAfter.contains("test_group_F")&& groupAfter.size() == 3); } finally { if (resourceManager != null) { resourceManager.stop(); } } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRMHAWithFileSystemBasedConfiguration() throws IOException, YarnException { StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER); configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); configuration.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); configuration.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2"); int base=100; for ( String confKey : YarnConfiguration.getServiceAddressConfKeys(configuration)) { configuration.set(HAUtil.addSuffix(confKey,"rm1"),"0.0.0.0:" + (base + 20)); configuration.set(HAUtil.addSuffix(confKey,"rm2"),"0.0.0.0:" + (base + 40)); base=base * 2; } Configuration conf1=new Configuration(configuration); conf1.set(YarnConfiguration.RM_HA_ID,"rm1"); Configuration conf2=new Configuration(configuration); conf2.set(YarnConfiguration.RM_HA_ID,"rm2"); uploadDefaultConfiguration(); MockRM rm1=null; MockRM rm2=null; try { rm1=new MockRM(conf1); rm1.init(conf1); rm1.start(); Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.STANDBY); rm2=new MockRM(conf2); rm2.init(conf1); rm2.start(); Assert.assertTrue(rm2.getRMContext().getHAServiceState() == HAServiceState.STANDBY); rm1.adminService.transitionToActive(requestInfo); Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.ACTIVE); CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); csConf.set("yarn.scheduler.capacity.maximum-applications","5000"); uploadConfiguration(csConf,"capacity-scheduler.xml"); rm1.adminService.refreshQueues(RefreshQueuesRequest.newInstance()); int maxApps=((CapacityScheduler)rm1.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications(); Assert.assertEquals(maxApps,5000); int maxAppsBeforeFailOver=((CapacityScheduler)rm2.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications(); Assert.assertEquals(maxAppsBeforeFailOver,10000); rm1.adminService.transitionToStandby(requestInfo); rm2.adminService.transitionToActive(requestInfo); Assert.assertTrue(rm1.getRMContext().getHAServiceState() == HAServiceState.STANDBY); Assert.assertTrue(rm2.getRMContext().getHAServiceState() == HAServiceState.ACTIVE); int maxAppsAfter=((CapacityScheduler)rm2.getRMContext().getScheduler()).getConfiguration().getMaximumSystemApplications(); Assert.assertEquals(maxAppsAfter,5000); } finally { if (rm1 != null) { rm1.stop(); } if (rm2 != null) { rm2.stop(); } } }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test public void testAdminAclsWithFileSystemBasedConfigurationProvider() throws IOException, YarnException { configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); uploadDefaultConfiguration(); try { rm=new MockRM(configuration); rm.init(configuration); rm.start(); } catch ( Exception ex) { fail("Should not get any exceptions"); } String aclStringBefore=rm.adminService.getAccessControlList().getAclString().trim(); YarnConfiguration yarnConf=new YarnConfiguration(); yarnConf.set(YarnConfiguration.YARN_ADMIN_ACL,"world:anyone:rwcda"); uploadConfiguration(yarnConf,"yarn-site.xml"); rm.adminService.refreshAdminAcls(RefreshAdminAclsRequest.newInstance()); String aclStringAfter=rm.adminService.getAccessControlList().getAclString().trim(); Assert.assertTrue(!aclStringAfter.equals(aclStringBefore)); Assert.assertEquals(aclStringAfter,"world:anyone:rwcda"); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testAdminRefreshQueuesWithFileSystemBasedConfigurationProvider() throws IOException, YarnException { configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,"org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider"); uploadDefaultConfiguration(); try { rm=new MockRM(configuration); rm.init(configuration); rm.start(); } catch ( Exception ex) { fail("Should not get any exceptions"); } CapacityScheduler cs=(CapacityScheduler)rm.getRMContext().getScheduler(); int maxAppsBefore=cs.getConfiguration().getMaximumSystemApplications(); CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); csConf.set("yarn.scheduler.capacity.maximum-applications","5000"); uploadConfiguration(csConf,"capacity-scheduler.xml"); rm.adminService.refreshQueues(RefreshQueuesRequest.newInstance()); int maxAppsAfter=cs.getConfiguration().getMaximumSystemApplications(); Assert.assertEquals(maxAppsAfter,5000); Assert.assertTrue(maxAppsAfter != maxAppsBefore); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestRMAuditLogger

    EqualityVerifier 
    /** * Test the AuditLog format with key-val pair. */ @Test public void testKeyValLogFormat() throws Exception { StringBuilder actLog=new StringBuilder(); StringBuilder expLog=new StringBuilder(); RMAuditLogger.start(Keys.USER,USER,actLog); expLog.append("USER=test"); assertEquals(expLog.toString(),actLog.toString()); RMAuditLogger.add(Keys.OPERATION,OPERATION,actLog); expLog.append("\tOPERATION=oper"); assertEquals(expLog.toString(),actLog.toString()); RMAuditLogger.add(Keys.APPID,(String)null,actLog); expLog.append("\tAPPID=null"); assertEquals(expLog.toString(),actLog.toString()); RMAuditLogger.add(Keys.TARGET,TARGET,actLog); expLog.append("\tTARGET=tgt"); assertEquals(expLog.toString(),actLog.toString()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestRMHA

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRMDispatcherForHA() throws IOException { String errorMessageForEventHandler="Expect to get the same number of handlers"; String errorMessageForService="Expect to get the same number of services"; configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); Configuration conf=new YarnConfiguration(configuration); rm=new MockRM(conf){ @Override protected Dispatcher createDispatcher(){ return new MyCountingDispatcher(); } } ; rm.init(conf); int expectedEventHandlerCount=((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount(); int expectedServiceCount=rm.getServices().size(); assertTrue(expectedEventHandlerCount != 0); StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER); assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState()); assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive()); rm.start(); rm.adminService.transitionToStandby(requestInfo); rm.adminService.transitionToActive(requestInfo); rm.adminService.transitionToStandby(requestInfo); rm.adminService.transitionToActive(requestInfo); rm.adminService.transitionToStandby(requestInfo); MyCountingDispatcher dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher(); assertTrue(!dispatcher.isStopped()); rm.adminService.transitionToActive(requestInfo); assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount()); assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size()); dispatcher=(MyCountingDispatcher)rm.getRMContext().getDispatcher(); rm.adminService.transitionToStandby(requestInfo); assertEquals(errorMessageForEventHandler,expectedEventHandlerCount,((MyCountingDispatcher)rm.getRMContext().getDispatcher()).getEventHandlerCount()); assertEquals(errorMessageForService,expectedServiceCount,rm.getServices().size()); assertTrue(dispatcher.isStopped()); rm.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test to verify the following RM HA transitions to the following states. * 1. Standby: Should be a no-op * 2. Active: Active services should start * 3. Active: Should be a no-op. * While active, submit a couple of jobs * 4. Standby: Active services should stop * 5. Active: Active services should start * 6. Stop the RM: All services should stop and RM should not be ready to * become Active */ @Test(timeout=30000) public void testFailoverAndTransitions() throws Exception { configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); Configuration conf=new YarnConfiguration(configuration); rm=new MockRM(conf); rm.init(conf); StateChangeRequestInfo requestInfo=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER); assertEquals(STATE_ERR,HAServiceState.INITIALIZING,rm.adminService.getServiceStatus().getState()); assertFalse("RM is ready to become active before being started",rm.adminService.getServiceStatus().isReadyToBecomeActive()); checkMonitorHealth(); rm.start(); checkMonitorHealth(); checkStandbyRMFunctionality(); verifyClusterMetrics(0,0,0,0,0,0); rm.adminService.transitionToStandby(requestInfo); checkMonitorHealth(); checkStandbyRMFunctionality(); verifyClusterMetrics(0,0,0,0,0,0); rm.adminService.transitionToActive(requestInfo); checkMonitorHealth(); checkActiveRMFunctionality(); verifyClusterMetrics(1,1,1,1,2048,1); rm.adminService.transitionToActive(requestInfo); checkMonitorHealth(); checkActiveRMFunctionality(); verifyClusterMetrics(1,2,2,2,2048,2); rm.adminService.transitionToStandby(requestInfo); checkMonitorHealth(); checkStandbyRMFunctionality(); verifyClusterMetrics(0,0,0,0,0,0); rm.adminService.transitionToActive(requestInfo); checkMonitorHealth(); checkActiveRMFunctionality(); verifyClusterMetrics(1,1,1,1,2048,1); rm.stop(); assertEquals(STATE_ERR,HAServiceState.STOPPING,rm.adminService.getServiceStatus().getState()); assertFalse("RM is ready to become active even after it is stopped",rm.adminService.getServiceStatus().isReadyToBecomeActive()); assertFalse("Active RM services are started",rm.areActiveServicesRunning()); checkMonitorHealth(); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testHAIDLookup(){ Configuration conf=new YarnConfiguration(configuration); rm=new MockRM(conf); rm.init(conf); assertEquals(conf.get(YarnConfiguration.RM_HA_ID),RM2_NODE_ID); configuration.set(YarnConfiguration.RM_HA_ID,RM1_NODE_ID); conf=new YarnConfiguration(configuration); rm=new MockRM(conf); rm.init(conf); assertEquals(conf.get(YarnConfiguration.RM_HA_ID),RM1_NODE_ID); configuration.set(YarnConfiguration.RM_HA_IDS,RM1_NODE_ID + "," + RM3_NODE_ID); configuration.unset(YarnConfiguration.RM_HA_ID); conf=new YarnConfiguration(configuration); try { rm=new MockRM(conf); rm.init(conf); fail("Should get an exception here."); } catch ( Exception ex) { Assert.assertTrue(ex.getMessage().contains("Invalid configuration! Can not find valid RM_HA_ID.")); } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestRMNodeTransitions

    InternalCallVerifier EqualityVerifier 
    @Test public void testRunningExpire(){ RMNodeImpl node=getRunningNode(); ClusterMetrics cm=ClusterMetrics.getMetrics(); int initialActive=cm.getNumActiveNMs(); int initialLost=cm.getNumLostNMs(); int initialUnhealthy=cm.getUnhealthyNMs(); int initialDecommissioned=cm.getNumDecommisionedNMs(); int initialRebooted=cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.EXPIRE)); Assert.assertEquals("Active Nodes",initialActive - 1,cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes",initialLost + 1,cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.LOST,node.getState()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testUnhealthyRebooting(){ RMNodeImpl node=getUnhealthyNode(); ClusterMetrics cm=ClusterMetrics.getMetrics(); int initialActive=cm.getNumActiveNMs(); int initialLost=cm.getNumLostNMs(); int initialUnhealthy=cm.getUnhealthyNMs(); int initialDecommissioned=cm.getNumDecommisionedNMs(); int initialRebooted=cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.REBOOTING)); Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes",initialUnhealthy - 1,cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes",initialRebooted + 1,cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.REBOOTED,node.getState()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testUnhealthyDecommission(){ RMNodeImpl node=getUnhealthyNode(); ClusterMetrics cm=ClusterMetrics.getMetrics(); int initialActive=cm.getNumActiveNMs(); int initialLost=cm.getNumLostNMs(); int initialUnhealthy=cm.getUnhealthyNMs(); int initialDecommissioned=cm.getNumDecommisionedNMs(); int initialRebooted=cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.DECOMMISSION)); Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes",initialUnhealthy - 1,cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes",initialDecommissioned + 1,cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.DECOMMISSIONED,node.getState()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testReconnnectUpdate(){ final String nmVersion1="nm version 1"; final String nmVersion2="nm version 2"; RMNodeImpl node=getRunningNode(nmVersion1); Assert.assertEquals(nmVersion1,node.getNodeManagerVersion()); RMNodeImpl reconnectingNode=getRunningNode(nmVersion2); node.handle(new RMNodeReconnectEvent(node.getNodeID(),reconnectingNode,null)); Assert.assertEquals(nmVersion2,node.getNodeManagerVersion()); }

    EqualityVerifier 
    @Test(timeout=5000) public void testContainerUpdate() throws InterruptedException { node.handle(new RMNodeStartedEvent(null,null,null)); NodeId nodeId=BuilderUtils.newNodeId("localhost:1",1); RMNodeImpl node2=new RMNodeImpl(nodeId,rmContext,null,0,0,null,null,null); node2.handle(new RMNodeStartedEvent(null,null,null)); ContainerId completedContainerIdFromNode1=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(0,0),0),0); ContainerId completedContainerIdFromNode2_1=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(1,1),1),1); ContainerId completedContainerIdFromNode2_2=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(1,1),1),2); RMNodeStatusEvent statusEventFromNode1=getMockRMNodeStatusEvent(); RMNodeStatusEvent statusEventFromNode2_1=getMockRMNodeStatusEvent(); RMNodeStatusEvent statusEventFromNode2_2=getMockRMNodeStatusEvent(); ContainerStatus containerStatusFromNode1=mock(ContainerStatus.class); ContainerStatus containerStatusFromNode2_1=mock(ContainerStatus.class); ContainerStatus containerStatusFromNode2_2=mock(ContainerStatus.class); doReturn(completedContainerIdFromNode1).when(containerStatusFromNode1).getContainerId(); doReturn(Collections.singletonList(containerStatusFromNode1)).when(statusEventFromNode1).getContainers(); node.handle(statusEventFromNode1); Assert.assertEquals(1,completedContainers.size()); Assert.assertEquals(completedContainerIdFromNode1,completedContainers.get(0).getContainerId()); completedContainers.clear(); doReturn(completedContainerIdFromNode2_1).when(containerStatusFromNode2_1).getContainerId(); doReturn(Collections.singletonList(containerStatusFromNode2_1)).when(statusEventFromNode2_1).getContainers(); doReturn(completedContainerIdFromNode2_2).when(containerStatusFromNode2_2).getContainerId(); doReturn(Collections.singletonList(containerStatusFromNode2_2)).when(statusEventFromNode2_2).getContainers(); node2.setNextHeartBeat(false); node2.handle(statusEventFromNode2_1); node2.setNextHeartBeat(true); node2.handle(statusEventFromNode2_2); Assert.assertEquals(2,completedContainers.size()); Assert.assertEquals(completedContainerIdFromNode2_1,completedContainers.get(0).getContainerId()); Assert.assertEquals(completedContainerIdFromNode2_2,completedContainers.get(1).getContainerId()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testUnhealthyExpire(){ RMNodeImpl node=getUnhealthyNode(); ClusterMetrics cm=ClusterMetrics.getMetrics(); int initialActive=cm.getNumActiveNMs(); int initialLost=cm.getNumLostNMs(); int initialUnhealthy=cm.getUnhealthyNMs(); int initialDecommissioned=cm.getNumDecommisionedNMs(); int initialRebooted=cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.EXPIRE)); Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes",initialLost + 1,cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes",initialUnhealthy - 1,cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.LOST,node.getState()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRunningRebooting(){ RMNodeImpl node=getRunningNode(); ClusterMetrics cm=ClusterMetrics.getMetrics(); int initialActive=cm.getNumActiveNMs(); int initialLost=cm.getNumLostNMs(); int initialUnhealthy=cm.getUnhealthyNMs(); int initialDecommissioned=cm.getNumDecommisionedNMs(); int initialRebooted=cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.REBOOTING)); Assert.assertEquals("Active Nodes",initialActive - 1,cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes",initialRebooted + 1,cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.REBOOTED,node.getState()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=20000) public void testUpdateHeartbeatResponseForCleanup(){ RMNodeImpl node=getRunningNode(); NodeId nodeId=node.getNodeID(); ContainerId completedContainerId=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(0,0),0),0); node.handle(new RMNodeCleanContainerEvent(nodeId,completedContainerId)); Assert.assertEquals(1,node.getContainersToCleanUp().size()); ApplicationId finishedAppId=BuilderUtils.newApplicationId(0,1); node.handle(new RMNodeCleanAppEvent(nodeId,finishedAppId)); Assert.assertEquals(1,node.getAppsToCleanup().size()); RMNodeStatusEvent statusEvent=getMockRMNodeStatusEvent(); node.handle(statusEvent); Assert.assertEquals(1,node.getContainersToCleanUp().size()); Assert.assertEquals(1,node.getAppsToCleanup().size()); NodeHeartbeatResponse hbrsp=Records.newRecord(NodeHeartbeatResponse.class); node.updateNodeHeartbeatResponseForCleanup(hbrsp); Assert.assertEquals(0,node.getContainersToCleanUp().size()); Assert.assertEquals(0,node.getAppsToCleanup().size()); Assert.assertEquals(1,hbrsp.getContainersToCleanup().size()); Assert.assertEquals(completedContainerId,hbrsp.getContainersToCleanup().get(0)); Assert.assertEquals(1,hbrsp.getApplicationsToCleanup().size()); Assert.assertEquals(finishedAppId,hbrsp.getApplicationsToCleanup().get(0)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testUnhealthyExpireForSchedulerRemove(){ RMNodeImpl node=getUnhealthyNode(); verify(scheduler,times(2)).handle(any(NodeRemovedSchedulerEvent.class)); node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.EXPIRE)); verify(scheduler,times(2)).handle(any(NodeRemovedSchedulerEvent.class)); Assert.assertEquals(NodeState.LOST,node.getState()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testAdd(){ RMNodeImpl node=getNewNode(); ClusterMetrics cm=ClusterMetrics.getMetrics(); int initialActive=cm.getNumActiveNMs(); int initialLost=cm.getNumLostNMs(); int initialUnhealthy=cm.getUnhealthyNMs(); int initialDecommissioned=cm.getNumDecommisionedNMs(); int initialRebooted=cm.getNumRebootedNMs(); node.handle(new RMNodeStartedEvent(node.getNodeID(),null,null)); Assert.assertEquals("Active Nodes",initialActive + 1,cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.RUNNING,node.getState()); Assert.assertNotNull(nodesListManagerEvent); Assert.assertEquals(NodesListManagerEventType.NODE_USABLE,nodesListManagerEvent.getType()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testStatusChange(){ node.handle(new RMNodeStartedEvent(null,null,null)); node.setNextHeartBeat(false); ContainerId completedContainerId1=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(0,0),0),0); ContainerId completedContainerId2=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(1,1),1),1); RMNodeStatusEvent statusEvent1=getMockRMNodeStatusEvent(); RMNodeStatusEvent statusEvent2=getMockRMNodeStatusEvent(); ContainerStatus containerStatus1=mock(ContainerStatus.class); ContainerStatus containerStatus2=mock(ContainerStatus.class); doReturn(completedContainerId1).when(containerStatus1).getContainerId(); doReturn(Collections.singletonList(containerStatus1)).when(statusEvent1).getContainers(); doReturn(completedContainerId2).when(containerStatus2).getContainerId(); doReturn(Collections.singletonList(containerStatus2)).when(statusEvent2).getContainers(); verify(scheduler,times(1)).handle(any(NodeUpdateSchedulerEvent.class)); node.handle(statusEvent1); node.handle(statusEvent2); verify(scheduler,times(1)).handle(any(NodeUpdateSchedulerEvent.class)); Assert.assertEquals(2,node.getQueueSize()); node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.EXPIRE)); Assert.assertEquals(0,node.getQueueSize()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRunningDecommission(){ RMNodeImpl node=getRunningNode(); ClusterMetrics cm=ClusterMetrics.getMetrics(); int initialActive=cm.getNumActiveNMs(); int initialLost=cm.getNumLostNMs(); int initialUnhealthy=cm.getUnhealthyNMs(); int initialDecommissioned=cm.getNumDecommisionedNMs(); int initialRebooted=cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(),RMNodeEventType.DECOMMISSION)); Assert.assertEquals("Active Nodes",initialActive - 1,cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes",initialDecommissioned + 1,cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.DECOMMISSIONED,node.getState()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testReconnect(){ RMNodeImpl node=getRunningNode(); ClusterMetrics cm=ClusterMetrics.getMetrics(); int initialActive=cm.getNumActiveNMs(); int initialLost=cm.getNumLostNMs(); int initialUnhealthy=cm.getUnhealthyNMs(); int initialDecommissioned=cm.getNumDecommisionedNMs(); int initialRebooted=cm.getNumRebootedNMs(); node.handle(new RMNodeReconnectEvent(node.getNodeID(),node,null)); Assert.assertEquals("Active Nodes",initialActive,cm.getNumActiveNMs()); Assert.assertEquals("Lost Nodes",initialLost,cm.getNumLostNMs()); Assert.assertEquals("Unhealthy Nodes",initialUnhealthy,cm.getUnhealthyNMs()); Assert.assertEquals("Decommissioned Nodes",initialDecommissioned,cm.getNumDecommisionedNMs()); Assert.assertEquals("Rebooted Nodes",initialRebooted,cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.RUNNING,node.getState()); Assert.assertNotNull(nodesListManagerEvent); Assert.assertEquals(NodesListManagerEventType.NODE_USABLE,nodesListManagerEvent.getType()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testExpiredContainer(){ node.handle(new RMNodeStartedEvent(null,null,null)); verify(scheduler).handle(any(NodeAddedSchedulerEvent.class)); ContainerId completedContainerId=BuilderUtils.newContainerId(BuilderUtils.newApplicationAttemptId(BuilderUtils.newApplicationId(0,0),0),0); node.handle(new RMNodeCleanContainerEvent(null,completedContainerId)); Assert.assertEquals(1,node.getContainersToCleanUp().size()); RMNodeStatusEvent statusEvent=getMockRMNodeStatusEvent(); ContainerStatus containerStatus=mock(ContainerStatus.class); doReturn(completedContainerId).when(containerStatus).getContainerId(); doReturn(Collections.singletonList(containerStatus)).when(statusEvent).getContainers(); node.handle(statusEvent); verify(scheduler,times(2)).handle(any(NodeUpdateSchedulerEvent.class)); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testRMRestartAppRunningAMFailed() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200); MockAM am0=launchAM(app0,rm1,nm1); nm1.nodeHeartbeat(am0.getApplicationAttemptId(),1,ContainerState.COMPLETE); am0.waitForState(RMAppAttemptState.FAILED); ApplicationState appState=rmAppState.get(app0.getApplicationId()); Assert.assertEquals(RMAppAttemptState.FAILED,appState.getAttempt(am0.getApplicationAttemptId()).getState()); Assert.assertNull(rmAppState.get(app0.getApplicationId()).getState()); rm1.waitForState(app0.getApplicationId(),RMAppState.ACCEPTED); MockRM rm2=new MockRM(conf,memStore); rm2.start(); rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FAILED); rm1.stop(); rm2.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("rawtypes") @Test(timeout=180000) public void testRMRestart() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); MockNM nm2=new MockNM("127.0.0.2:5678",15120,rm1.getResourceTrackerService()); nm1.registerNode(); nm2.registerNode(); RMApp app0=rm1.submitApp(200); RMAppAttempt attempt0=app0.getCurrentAppAttempt(); Assert.assertEquals(1,rmAppState.size()); nm1.nodeHeartbeat(true); MockAM am0=rm1.sendAMLaunched(attempt0.getAppAttemptId()); am0.registerAppAttempt(); finishApplicationMaster(app0,rm1,nm1,am0); RMApp app1=rm1.submitApp(200); ApplicationState appState=rmAppState.get(app1.getApplicationId()); Assert.assertNotNull(appState); Assert.assertEquals(0,appState.getAttemptCount()); Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app1.getApplicationSubmissionContext().getApplicationId()); nm1.nodeHeartbeat(true); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); ApplicationAttemptId attemptId1=attempt1.getAppAttemptId(); rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED); Assert.assertEquals(1,appState.getAttemptCount()); ApplicationAttemptState attemptState=appState.getAttempt(attemptId1); Assert.assertNotNull(attemptState); Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId()); MockAM am1=rm1.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); am1.allocate("127.0.0.1",1000,1,new ArrayList()); nm1.nodeHeartbeat(true); List conts=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); while (conts.size() == 0) { nm1.nodeHeartbeat(true); conts.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers()); Thread.sleep(500); } RMApp app2=rm1.submitApp(200); appState=rmAppState.get(app2.getApplicationId()); Assert.assertNotNull(appState); Assert.assertEquals(0,appState.getAttemptCount()); Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app2.getApplicationSubmissionContext().getApplicationId()); RMApp appUnmanaged=rm1.submitApp(200,"someApp","someUser",null,true,null,conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null); ApplicationAttemptId unmanagedAttemptId=appUnmanaged.getCurrentAppAttempt().getAppAttemptId(); ApplicationId unmanagedAppId=appUnmanaged.getApplicationId(); appState=rmAppState.get(unmanagedAppId); Assert.assertNotNull(appState); rm1.waitForState(unmanagedAttemptId,RMAppAttemptState.LAUNCHED); rm1.waitForState(unmanagedAppId,RMAppState.ACCEPTED); Assert.assertEquals(1,appState.getAttemptCount()); Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),appUnmanaged.getApplicationSubmissionContext().getApplicationId()); MockRM rm2=new MockRM(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); nm2.setResourceTrackerService(rm2.getResourceTrackerService()); Assert.assertEquals(4,rm2.getRMContext().getRMApps().size()); rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED); rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FINISHED); RMApp loadedApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId()); Assert.assertNotNull(loadedApp1); Assert.assertEquals(1,loadedApp1.getAppAttempts().size()); Assert.assertEquals(app1.getApplicationSubmissionContext().getApplicationId(),loadedApp1.getApplicationSubmissionContext().getApplicationId()); RMApp loadedApp2=rm2.getRMContext().getRMApps().get(app2.getApplicationId()); Assert.assertNotNull(loadedApp2); Assert.assertEquals(app2.getApplicationSubmissionContext().getApplicationId(),loadedApp2.getApplicationSubmissionContext().getApplicationId()); rm2.waitForState(loadedApp1.getApplicationId(),RMAppState.ACCEPTED); rm2.waitForState(loadedApp2.getApplicationId(),RMAppState.ACCEPTED); Assert.assertEquals(1,loadedApp1.getAppAttempts().size()); Assert.assertEquals(1,loadedApp2.getAppAttempts().size()); am1.setAMRMProtocol(rm2.getApplicationMasterService(),rm2.getRMContext()); AllocateResponse allocResponse=am1.allocate(new ArrayList(),new ArrayList()); Assert.assertEquals(AMCommand.AM_SHUTDOWN,allocResponse.getAMCommand()); NodeHeartbeatResponse hbResponse=nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction()); hbResponse=nm2.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC,hbResponse.getNodeAction()); nm1=new MockNM("127.0.0.1:1234",15120,rm2.getResourceTrackerService()); nm2=new MockNM("127.0.0.2:5678",15120,rm2.getResourceTrackerService()); NMContainerStatus status=TestRMRestart.createNMContainerStatus(loadedApp1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE); nm1.registerNode(Arrays.asList(status),null); nm2.registerNode(); rm2.waitForState(loadedApp1.getApplicationId(),RMAppState.ACCEPTED); int timeoutSecs=0; while (loadedApp1.getAppAttempts().size() != 2 && timeoutSecs++ < 40) { ; Thread.sleep(200); } hbResponse=nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.RESYNC != hbResponse.getNodeAction()); hbResponse=nm2.nodeHeartbeat(true); Assert.assertTrue(NodeAction.RESYNC != hbResponse.getNodeAction()); attempt1=loadedApp1.getCurrentAppAttempt(); attemptId1=attempt1.getAppAttemptId(); rm2.waitForState(attemptId1,RMAppAttemptState.ALLOCATED); appState=rmAppState.get(loadedApp1.getApplicationId()); attemptState=appState.getAttempt(attemptId1); Assert.assertNotNull(attemptState); Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId()); MockNM am1Node=nm1; if (attemptState.getMasterContainer().getNodeId().toString().contains("127.0.0.2")) { am1Node=nm2; } RMAppAttempt attempt2=loadedApp2.getCurrentAppAttempt(); ApplicationAttemptId attemptId2=attempt2.getAppAttemptId(); rm2.waitForState(attemptId2,RMAppAttemptState.ALLOCATED); appState=rmAppState.get(loadedApp2.getApplicationId()); attemptState=appState.getAttempt(attemptId2); Assert.assertNotNull(attemptState); Assert.assertEquals(BuilderUtils.newContainerId(attemptId2,1),attemptState.getMasterContainer().getId()); MockNM am2Node=nm1; if (attemptState.getMasterContainer().getNodeId().toString().contains("127.0.0.2")) { am2Node=nm2; } am1=rm2.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); MockAM am2=rm2.sendAMLaunched(attempt2.getAppAttemptId()); am2.registerAppAttempt(); am1.allocate("127.0.0.1",1000,3,new ArrayList()); am2.allocate("127.0.0.2",1000,1,new ArrayList()); nm1.nodeHeartbeat(true); nm2.nodeHeartbeat(true); conts=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); while (conts.size() == 0) { nm1.nodeHeartbeat(true); nm2.nodeHeartbeat(true); conts.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers()); Thread.sleep(500); } finishApplicationMaster(loadedApp1,rm2,am1Node,am1); finishApplicationMaster(loadedApp2,rm2,am2Node,am2); rm2.stop(); rm1.stop(); Assert.assertEquals(4,rmAppState.size()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testRMRestartFailedApp() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200); MockAM am0=launchAM(app0,rm1,nm1); nm1.nodeHeartbeat(am0.getApplicationAttemptId(),1,ContainerState.COMPLETE); am0.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app0.getApplicationId(),RMAppState.FAILED); ApplicationState appState=rmAppState.get(app0.getApplicationId()); Assert.assertEquals(RMAppState.FAILED,appState.getState()); Assert.assertEquals(RMAppAttemptState.FAILED,appState.getAttempt(am0.getApplicationAttemptId()).getState()); MockRM rm2=new MockRM(conf,memStore); rm2.start(); RMApp loadedApp0=rm2.getRMContext().getRMApps().get(app0.getApplicationId()); rm2.waitForState(app0.getApplicationId(),RMAppState.FAILED); rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.FAILED); Assert.assertEquals(1,loadedApp0.getAppAttempts().size()); verifyAppReportAfterRMRestart(app0,rm2); Assert.assertTrue(app0.getDiagnostics().toString().contains("Failing the application.")); rm1.stop(); rm2.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testRMStateStoreDispatcherDrainedOnRMStop() throws Exception { MemoryRMStateStore memStore=new MemoryRMStateStore(){ volatile boolean wait=true; @Override public void serviceStop() throws Exception { wait=false; super.serviceStop(); } @Override protected void handleStoreEvent( RMStateStoreEvent event){ while (wait) ; super.handleStoreEvent(event); } } ; memStore.init(conf); final MockRM rm1=new MockRM(conf,memStore); rm1.start(); final ArrayList appList=new ArrayList(); final int NUM_APPS=5; for (int i=0; i < NUM_APPS; i++) { RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false); appList.add(app); rm1.waitForState(app.getApplicationId(),RMAppState.NEW_SAVING); } Map rmAppState=memStore.getState().getApplicationState(); Assert.assertTrue(rmAppState.size() == 0); rm1.stop(); for ( RMApp app : appList) { ApplicationState appState=rmAppState.get(app.getApplicationId()); Assert.assertNotNull(appState); Assert.assertEquals(0,appState.getAttemptCount()); Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app.getApplicationSubmissionContext().getApplicationId()); } Assert.assertTrue(rmAppState.size() == NUM_APPS); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testRMRestartSucceededApp() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200); MockAM am0=launchAM(app0,rm1,nm1); FinishApplicationMasterRequest req=FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.SUCCEEDED,"diagnostics","trackingUrl"); finishApplicationMaster(app0,rm1,nm1,am0,req); ApplicationState appState=rmAppState.get(app0.getApplicationId()); ApplicationAttemptState attemptState0=appState.getAttempt(am0.getApplicationAttemptId()); Assert.assertEquals("diagnostics",attemptState0.getDiagnostics()); Assert.assertEquals(FinalApplicationStatus.SUCCEEDED,attemptState0.getFinalApplicationStatus()); Assert.assertEquals("trackingUrl",attemptState0.getFinalTrackingUrl()); Assert.assertEquals(app0.getFinishTime(),appState.getFinishTime()); MockRM rm2=new MockRM(conf,memStore); rm2.start(); ApplicationReport appReport=verifyAppReportAfterRMRestart(app0,rm2); Assert.assertEquals(FinalApplicationStatus.SUCCEEDED,appReport.getFinalApplicationStatus()); Assert.assertEquals("trackingUrl",appReport.getOriginalTrackingUrl()); rm1.stop(); rm2.stop(); }

    EqualityVerifier 
    @Test(timeout=60000) public void testDecomissionedNMsMetricsOnRMRestart() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,hostFile.getAbsolutePath()); writeToHostsFile(""); MockRM rm1=new MockRM(conf); rm1.start(); rm1.registerNode("localhost:1234",8000); rm1.registerNode("host2:1234",8000); Assert.assertEquals(0,ClusterMetrics.getMetrics().getNumDecommisionedNMs()); String ip=NetUtils.normalizeHostName("localhost"); writeToHostsFile("host2",ip); rm1.getNodesListManager().refreshNodes(conf); Assert.assertEquals(2,ClusterMetrics.getMetrics().getNumDecommisionedNMs()); MockRM rm2=new MockRM(conf); rm2.start(); Assert.assertEquals(2,ClusterMetrics.getMetrics().getNumDecommisionedNMs()); rm1.stop(); rm2.stop(); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testDelegationTokenRestoredInDelegationTokenRenewer() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new TestSecurityMockRM(conf,memStore); rm1.start(); HashSet> tokenSet=new HashSet>(); Credentials ts=new Credentials(); Text userText1=new Text("user1"); RMDelegationTokenIdentifier dtId1=new RMDelegationTokenIdentifier(userText1,new Text("renewer1"),userText1); Token token1=new Token(dtId1,rm1.getRMContext().getRMDelegationTokenSecretManager()); SecurityUtil.setTokenService(token1,rmAddr); ts.addToken(userText1,token1); tokenSet.add(token1); Text userText2=new Text("user2"); RMDelegationTokenIdentifier dtId2=new RMDelegationTokenIdentifier(userText2,new Text("renewer2"),userText2); Token token2=new Token(dtId2,rm1.getRMContext().getRMDelegationTokenSecretManager()); SecurityUtil.setTokenService(token2,rmAddr); ts.addToken(userText2,token2); tokenSet.add(token2); RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,ts); ApplicationState appState=rmAppState.get(app.getApplicationId()); Assert.assertNotNull(appState); Assert.assertEquals(tokenSet,rm1.getRMContext().getDelegationTokenRenewer().getDelegationTokens()); DataOutputBuffer dob=new DataOutputBuffer(); ts.writeTokenStorageToStream(dob); ByteBuffer securityTokens=ByteBuffer.wrap(dob.getData(),0,dob.getLength()); securityTokens.rewind(); Assert.assertEquals(securityTokens,appState.getApplicationSubmissionContext().getAMContainerSpec().getTokens()); MockRM rm2=new TestSecurityMockRM(conf,memStore); rm2.start(); waitForTokensToBeRenewed(rm2); Assert.assertEquals(tokenSet,rm2.getRMContext().getDelegationTokenRenewer().getDelegationTokens()); rm1.stop(); rm2.stop(); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testFinishedAppRemovalAfterRMRestart() throws Exception { MemoryRMStateStore memStore=new MemoryRMStateStore(); conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS,1); memStore.init(conf); RMState rmState=memStore.getState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200); MockAM am0=launchAM(app0,rm1,nm1); finishApplicationMaster(app0,rm1,nm1,am0); MockRM rm2=new MockRM(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); nm1=rm2.registerNode("127.0.0.1:1234",15120); Map rmAppState=rmState.getApplicationState(); Assert.assertEquals(RMAppState.FINISHED,rmAppState.get(app0.getApplicationId()).getState()); rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED); RMApp app1=rm2.submitApp(200); MockAM am1=launchAM(app1,rm2,nm1); finishApplicationMaster(app1,rm2,nm1,am1); Assert.assertNull(rm2.getRMContext().getRMApps().get(app0.getApplicationId())); Assert.assertNull(rmAppState.get(app0.getApplicationId())); rm1.stop(); rm2.stop(); }

    APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testRMRestartGetApplicationList() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType"); MockAM am0=launchAM(app0,rm1,nm1); finishApplicationMaster(app0,rm1,nm1,am0); RMApp app1=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType"); MockAM am1=launchAM(app1,rm1,nm1); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE); am1.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app1.getApplicationId(),RMAppState.FAILED); RMApp app2=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType"); MockAM am2=launchAM(app2,rm1,nm1); rm1.killApp(app2.getApplicationId()); rm1.waitForState(app2.getApplicationId(),RMAppState.KILLED); rm1.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.KILLED); MockRM rm2=new MockRM(conf,memStore){ @Override protected RMAppManager createRMAppManager(){ return spy(super.createRMAppManager()); } } ; rm2.start(); GetApplicationsRequest request1=GetApplicationsRequest.newInstance(EnumSet.of(YarnApplicationState.FINISHED,YarnApplicationState.KILLED,YarnApplicationState.FAILED)); GetApplicationsResponse response1=rm2.getClientRMService().getApplications(request1); List appList1=response1.getApplicationList(); boolean forApp0=false, forApp1=false, forApp2=false; for ( ApplicationReport report : appList1) { if (report.getApplicationId().equals(app0.getApplicationId())) { Assert.assertEquals(YarnApplicationState.FINISHED,report.getYarnApplicationState()); forApp0=true; } if (report.getApplicationId().equals(app1.getApplicationId())) { Assert.assertEquals(YarnApplicationState.FAILED,report.getYarnApplicationState()); forApp1=true; } if (report.getApplicationId().equals(app2.getApplicationId())) { Assert.assertEquals(YarnApplicationState.KILLED,report.getYarnApplicationState()); forApp2=true; } } Assert.assertTrue(forApp0 && forApp1 && forApp2); Set appTypes=new HashSet(); appTypes.add("myType"); GetApplicationsRequest request2=GetApplicationsRequest.newInstance(appTypes); GetApplicationsResponse response2=rm2.getClientRMService().getApplications(request2); List appList2=response2.getApplicationList(); Assert.assertTrue(3 == appList2.size()); verify(rm2.getRMAppManager(),times(3)).logApplicationSummary(isA(ApplicationId.class)); rm1.stop(); rm2.stop(); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testRMRestartOnMaxAppAttempts() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,null); RMApp app2=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null); ApplicationState appState=rmAppState.get(app1.getApplicationId()); Assert.assertNotNull(appState); Assert.assertEquals(0,appState.getAttemptCount()); Assert.assertEquals(appState.getApplicationSubmissionContext().getApplicationId(),app1.getApplicationSubmissionContext().getApplicationId()); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app1.getCurrentAppAttempt(); ApplicationAttemptId attemptId1=attempt.getAppAttemptId(); rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED); Assert.assertEquals(1,appState.getAttemptCount()); ApplicationAttemptState attemptState=appState.getAttempt(attemptId1); Assert.assertNotNull(attemptState); Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId()); conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,3000); MockRM rm2=new MockRM(conf,memStore); rm2.start(); Assert.assertEquals(2,rm2.getRMContext().getRMApps().get(app2.getApplicationId()).getMaxAppAttempts()); Assert.assertEquals(2,rm2.getRMContext().getRMApps().size()); rm2.waitForState(app1.getApplicationId(),RMAppState.FAILED); rm2.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED); Assert.assertEquals(RMAppState.FAILED,rmAppState.get(app1.getApplicationId()).getState()); Assert.assertNull(rmAppState.get(app2.getApplicationId()).getState()); rm1.stop(); rm2.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testRMRestartWaitForPreviousAMToFinish() throws Exception { YarnConfiguration conf=new YarnConfiguration(this.conf); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,40); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); final MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",16382,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am1=launchAM(app1,rm1,nm1); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE); am1.waitForState(RMAppAttemptState.FAILED); MockAM am2=launchAM(app1,rm1,nm1); Assert.assertEquals(1,rmAppState.size()); Assert.assertEquals(app1.getState(),RMAppState.RUNNING); Assert.assertEquals(app1.getAppAttempts().get(app1.getCurrentAppAttempt().getAppAttemptId()).getAppAttemptState(),RMAppAttemptState.RUNNING); MockRM rm2=null; rm2=new MockRM(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); NodeHeartbeatResponse res=nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC,res.getNodeAction()); RMApp rmApp=rm2.getRMContext().getRMApps().get(app1.getApplicationId()); rm2.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); Assert.assertEquals(RMAppState.ACCEPTED,rmApp.getState()); Assert.assertEquals(2,rmApp.getAppAttempts().size()); rm2.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.FAILED); rm2.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.LAUNCHED); Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(am1.getApplicationAttemptId()).getAppAttemptState()); Assert.assertEquals(RMAppAttemptState.LAUNCHED,rmApp.getAppAttempts().get(am2.getApplicationAttemptId()).getAppAttemptState()); NMContainerStatus status=TestRMRestart.createNMContainerStatus(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE); nm1.registerNode(Arrays.asList(status),null); rm2.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.FAILED); launchAM(rmApp,rm2,nm1); Assert.assertEquals(3,rmApp.getAppAttempts().size()); rm2.waitForState(rmApp.getCurrentAppAttempt().getAppAttemptId(),RMAppAttemptState.RUNNING); conf.setInt(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,10000); MockRM rm3=null; rm3=new MockRM(conf,memStore); rm3.start(); nm1.setResourceTrackerService(rm3.getResourceTrackerService()); rmApp=rm3.getRMContext().getRMApps().get(app1.getApplicationId()); rm3.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); Assert.assertEquals(rmApp.getState(),RMAppState.ACCEPTED); Assert.assertEquals(3,rmApp.getAppAttempts().size()); rm3.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.FAILED); rm3.waitForState(am2.getApplicationAttemptId(),RMAppAttemptState.FAILED); ApplicationAttemptId latestAppAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId(); rm3.waitForState(latestAppAttemptId,RMAppAttemptState.LAUNCHED); Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(am1.getApplicationAttemptId()).getAppAttemptState()); Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(am2.getApplicationAttemptId()).getAppAttemptState()); Assert.assertEquals(RMAppAttemptState.LAUNCHED,rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState()); rm3.waitForState(latestAppAttemptId,RMAppAttemptState.FAILED); rm3.waitForState(rmApp.getApplicationId(),RMAppState.ACCEPTED); Assert.assertEquals(4,rmApp.getAppAttempts().size()); Assert.assertEquals(RMAppAttemptState.FAILED,rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState()); latestAppAttemptId=rmApp.getCurrentAppAttempt().getAppAttemptId(); RMApp app2=rm3.submitApp(200); rm3.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED); Assert.assertEquals(1,app2.getAppAttempts().size()); Assert.assertEquals(0,memStore.getState().getApplicationState().get(app2.getApplicationId()).getAttemptCount()); MockRM rm4=null; rm4=new MockRM(conf,memStore); rm4.start(); rmApp=rm4.getRMContext().getRMApps().get(app1.getApplicationId()); rm4.waitForState(rmApp.getApplicationId(),RMAppState.ACCEPTED); int timeoutSecs=0; while (rmApp.getAppAttempts().size() != 2 && timeoutSecs++ < 40) { Thread.sleep(200); } Assert.assertEquals(4,rmApp.getAppAttempts().size()); Assert.assertEquals(RMAppState.ACCEPTED,rmApp.getState()); rm4.waitForState(latestAppAttemptId,RMAppAttemptState.SCHEDULED); Assert.assertEquals(RMAppAttemptState.SCHEDULED,rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState()); app2=rm4.getRMContext().getRMApps().get(app2.getApplicationId()); rm4.waitForState(app2.getApplicationId(),RMAppState.ACCEPTED); Assert.assertEquals(RMAppState.ACCEPTED,app2.getState()); Assert.assertEquals(1,app2.getAppAttempts().size()); rm4.waitForState(app2.getCurrentAppAttempt().getAppAttemptId(),RMAppAttemptState.SCHEDULED); Assert.assertEquals(RMAppAttemptState.SCHEDULED,app2.getCurrentAppAttempt().getAppAttemptState()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testAppAttemptTokensRestoredOnRMRestart() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); UserGroupInformation.setConfiguration(conf); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new TestSecurityMockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("0.0.0.0:4321",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),"default"); ApplicationState appState=rmAppState.get(app1.getApplicationId()); Assert.assertNotNull(appState); nm1.nodeHeartbeat(true); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); ApplicationAttemptId attemptId1=attempt1.getAppAttemptId(); rm1.waitForState(attemptId1,RMAppAttemptState.ALLOCATED); ApplicationAttemptState attemptState=appState.getAttempt(attemptId1); Assert.assertNotNull(attemptState); Assert.assertEquals(BuilderUtils.newContainerId(attemptId1,1),attemptState.getMasterContainer().getId()); byte[] clientTokenMasterKey=attempt1.getClientTokenMasterKey().getEncoded(); Credentials savedCredentials=attemptState.getAppAttemptCredentials(); Assert.assertArrayEquals("client token master key not saved",clientTokenMasterKey,savedCredentials.getSecretKey(RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME)); MockRM rm2=new TestSecurityMockRM(conf,memStore); rm2.start(); RMApp loadedApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId()); RMAppAttempt loadedAttempt1=loadedApp1.getRMAppAttempt(attemptId1); Assert.assertNotNull(loadedAttempt1); Assert.assertEquals("client token master key not restored",attempt1.getClientTokenMasterKey(),loadedAttempt1.getClientTokenMasterKey()); Assert.assertArrayEquals(clientTokenMasterKey,rm2.getClientToAMTokenSecretManager().getMasterKey(attemptId1).getEncoded()); Token amrmToken=loadedAttempt1.getAMRMToken(); Assert.assertArrayEquals(amrmToken.getPassword(),rm2.getRMContext().getAMRMTokenSecretManager().retrievePassword(amrmToken.decodeIdentifier())); rm1.stop(); rm2.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testClientRetryOnKillingApplication() throws Exception { MemoryRMStateStore memStore=new TestMemoryRMStateStore(); memStore.init(conf); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200,"name","user",null,false,"default",1,null,"myType"); MockAM am1=launchAM(app1,rm1,nm1); KillApplicationResponse response; int count=0; while (true) { response=rm1.killApp(app1.getApplicationId()); if (response.getIsKillCompleted()) { break; } Thread.sleep(100); count++; } Assert.assertTrue(count >= 1); rm1.waitForState(am1.getApplicationAttemptId(),RMAppAttemptState.KILLED); rm1.waitForState(app1.getApplicationId(),RMAppState.KILLED); Assert.assertEquals(1,((TestMemoryRMStateStore)memStore).updateAttempt); Assert.assertEquals(2,((TestMemoryRMStateStore)memStore).updateApp); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=60000) public void testRMRestartKilledApp() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",15120,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app0=rm1.submitApp(200); MockAM am0=launchAM(app0,rm1,nm1); rm1.killApp(app0.getApplicationId()); rm1.waitForState(app0.getApplicationId(),RMAppState.KILLED); rm1.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.KILLED); ApplicationState appState=rmAppState.get(app0.getApplicationId()); Assert.assertEquals(RMAppState.KILLED,appState.getState()); Assert.assertEquals(RMAppAttemptState.KILLED,appState.getAttempt(am0.getApplicationAttemptId()).getState()); MockRM rm2=new MockRM(conf,memStore); rm2.start(); RMApp loadedApp0=rm2.getRMContext().getRMApps().get(app0.getApplicationId()); rm2.waitForState(app0.getApplicationId(),RMAppState.KILLED); rm2.waitForState(am0.getApplicationAttemptId(),RMAppAttemptState.KILLED); Assert.assertEquals(1,loadedApp0.getAppAttempts().size()); ApplicationReport appReport=verifyAppReportAfterRMRestart(app0,rm2); Assert.assertEquals(app0.getDiagnostics().toString(),appReport.getDiagnostics()); rm1.stop(); rm2.stop(); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testRMRestartWaitForPreviousSucceededAttempt() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2); MemoryRMStateStore memStore=new MemoryRMStateStore(){ int count=0; @Override public void updateApplicationStateInternal( ApplicationId appId, ApplicationStateData appStateData) throws Exception { if (count == 0) { LOG.info(appId + " final state is not saved."); count++; } else { super.updateApplicationStateInternal(appId,appStateData); } } } ; memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=rm1.registerNode("127.0.0.1:1234",15120); RMApp app0=rm1.submitApp(200); MockAM am0=MockRM.launchAndRegisterAM(app0,rm1,nm1); FinishApplicationMasterRequest req=FinishApplicationMasterRequest.newInstance(FinalApplicationStatus.SUCCEEDED,"",""); am0.unregisterAppAttempt(req,true); am0.waitForState(RMAppAttemptState.FINISHING); Assert.assertNull(rmAppState.get(app0.getApplicationId()).getState()); MockRM rm2=new MockRM(conf,memStore); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); rm2.start(); rm2.waitForState(app0.getCurrentAppAttempt().getAppAttemptId(),RMAppAttemptState.FINISHED); rm2.waitForState(app0.getApplicationId(),RMAppState.FINISHED); Assert.assertEquals(RMAppState.FINISHED,rmAppState.get(app0.getApplicationId()).getState()); }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=60000) public void testRMDelegationTokenRestoredOnRMRestart() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,"kerberos"); conf.set(YarnConfiguration.RM_ADDRESS,"localhost:8032"); UserGroupInformation.setConfiguration(conf); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmAppState=rmState.getApplicationState(); Map rmDTState=rmState.getRMDTSecretManagerState().getTokenState(); Set rmDTMasterKeyState=rmState.getRMDTSecretManagerState().getMasterKeyState(); MockRM rm1=new TestSecurityMockRM(conf,memStore); rm1.start(); Credentials ts=new Credentials(); GetDelegationTokenRequest request1=GetDelegationTokenRequest.newInstance("renewer1"); UserGroupInformation.getCurrentUser().setAuthenticationMethod(AuthMethod.KERBEROS); GetDelegationTokenResponse response1=rm1.getClientRMService().getDelegationToken(request1); org.apache.hadoop.yarn.api.records.Token delegationToken1=response1.getRMDelegationToken(); Token token1=ConverterUtils.convertFromYarn(delegationToken1,rmAddr); RMDelegationTokenIdentifier dtId1=token1.decodeIdentifier(); HashSet tokenIdentSet=new HashSet(); ts.addToken(token1.getService(),token1); tokenIdentSet.add(dtId1); RMApp app=rm1.submitApp(200,"name","user",new HashMap(),false,"default",1,ts); ApplicationState appState=rmAppState.get(app.getApplicationId()); Assert.assertNotNull(appState); Set allKeysRM1=rm1.getRMContext().getRMDelegationTokenSecretManager().getAllMasterKeys(); Assert.assertEquals(allKeysRM1,rmDTMasterKeyState); Map allTokensRM1=rm1.getRMContext().getRMDelegationTokenSecretManager().getAllTokens(); Assert.assertEquals(tokenIdentSet,allTokensRM1.keySet()); Assert.assertEquals(allTokensRM1,rmDTState); Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),rmState.getRMDTSecretManagerState().getDTSequenceNumber()); GetDelegationTokenRequest request2=GetDelegationTokenRequest.newInstance("renewer2"); GetDelegationTokenResponse response2=rm1.getClientRMService().getDelegationToken(request2); org.apache.hadoop.yarn.api.records.Token delegationToken2=response2.getRMDelegationToken(); Token token2=ConverterUtils.convertFromYarn(delegationToken2,rmAddr); RMDelegationTokenIdentifier dtId2=token2.decodeIdentifier(); try { rm1.getRMContext().getRMDelegationTokenSecretManager().cancelToken(token2,UserGroupInformation.getCurrentUser().getUserName()); } catch ( Exception e) { Assert.fail(); } Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),dtId2.getSequenceNumber()); Assert.assertFalse(rmDTState.containsKey(dtId2)); MockRM rm2=new TestSecurityMockRM(conf,memStore); rm2.start(); Map allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens(); Assert.assertEquals(allTokensRM2.keySet(),allTokensRM1.keySet()); Assert.assertTrue(rm2.getRMContext().getRMDelegationTokenSecretManager().getAllMasterKeys().containsAll(allKeysRM1)); Assert.assertEquals(rm1.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber(),rm2.getRMContext().getRMDelegationTokenSecretManager().getLatestDTSequenceNumber()); Long renewDateBeforeRenew=allTokensRM2.get(dtId1); try { Thread.sleep(1); rm2.getRMContext().getRMDelegationTokenSecretManager().renewToken(token1,"renewer1"); } catch ( Exception e) { Assert.fail(); } allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens(); Long renewDateAfterRenew=allTokensRM2.get(dtId1); Assert.assertTrue(renewDateAfterRenew > renewDateBeforeRenew); Assert.assertTrue(rmDTState.containsValue(renewDateAfterRenew)); Assert.assertFalse(rmDTState.containsValue(renewDateBeforeRenew)); try { rm2.getRMContext().getRMDelegationTokenSecretManager().cancelToken(token1,UserGroupInformation.getCurrentUser().getUserName()); } catch ( Exception e) { Assert.fail(); } allTokensRM2=rm2.getRMContext().getRMDelegationTokenSecretManager().getAllTokens(); Assert.assertFalse(allTokensRM2.containsKey(dtId1)); Assert.assertFalse(rmDTState.containsKey(dtId1)); rm1.stop(); rm2.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestResourceManager

    APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=50000) public void testFilterOverrides() throws Exception { String filterInitializerConfKey="hadoop.http.filter.initializers"; String[] filterInitializers={AuthenticationFilterInitializer.class.getName(),RMAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + "," + RMAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + ", " + RMAuthenticationFilterInitializer.class.getName(),AuthenticationFilterInitializer.class.getName() + ", " + this.getClass().getName()}; for ( String filterInitializer : filterInitializers) { resourceManager=new ResourceManager(); Configuration conf=new YarnConfiguration(); conf.set(filterInitializerConfKey,filterInitializer); conf.set("hadoop.security.authentication","kerberos"); conf.set("hadoop.http.authentication.type","kerberos"); try { try { UserGroupInformation.setConfiguration(conf); } catch ( Exception e) { LOG.info("Got expected exception"); } resourceManager.init(conf); resourceManager.startWepApp(); } catch ( RuntimeException e) { String tmp=resourceManager.getConfig().get(filterInitializerConfKey); if (filterInitializer.contains(this.getClass().getName())) { Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName() + "," + this.getClass().getName(),tmp); } else { Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName(),tmp); } resourceManager.stop(); } } String[] simpleFilterInitializers={"",StaticUserWebFilter.class.getName()}; for ( String filterInitializer : simpleFilterInitializers) { resourceManager=new ResourceManager(); Configuration conf=new YarnConfiguration(); conf.set(filterInitializerConfKey,filterInitializer); try { UserGroupInformation.setConfiguration(conf); resourceManager.init(conf); resourceManager.startWepApp(); } catch ( RuntimeException e) { String tmp=resourceManager.getConfig().get(filterInitializerConfKey); if (filterInitializer.equals(StaticUserWebFilter.class.getName())) { Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName() + "," + StaticUserWebFilter.class.getName(),tmp); } else { Assert.assertEquals(RMAuthenticationFilterInitializer.class.getName(),tmp); } resourceManager.stop(); } } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestResourceTrackerService

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeRegistrationFailure() throws Exception { writeToHostsFile("host1"); Configuration conf=new Configuration(); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath()); rm=new MockRM(conf); rm.start(); ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService(); RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class); NodeId nodeId=NodeId.newInstance("host2",1234); req.setNodeId(nodeId); req.setHttpPort(1234); RegisterNodeManagerResponse response=resourceTrackerService.registerNodeManager(req); Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction()); Assert.assertEquals("Disallowed NodeManager from host2, Sending SHUTDOWN signal to the NodeManager.",response.getDiagnosticsMessage()); }

    InternalCallVerifier EqualityVerifier 
    /** * Decommissioning using a post-configured include hosts file */ @Test public void testAddNewIncludePathToConfiguration() throws Exception { Configuration conf=new Configuration(); rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("host1:1234",5120); MockNM nm2=rm.registerNode("host2:5678",10240); ClusterMetrics metrics=ClusterMetrics.getMetrics(); assert (metrics != null); int initialMetricCount=metrics.getNumDecommisionedNMs(); NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction()); nodeHeartbeat=nm2.nodeHeartbeat(true); Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction()); writeToHostsFile("host1"); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath()); rm.getNodesListManager().refreshNodes(conf); nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertEquals("Node should not have been decomissioned.",NodeAction.NORMAL,nodeHeartbeat.getNodeAction()); nodeHeartbeat=nm2.nodeHeartbeat(true); Assert.assertEquals("Node should have been decomissioned but is in state" + nodeHeartbeat.getNodeAction(),NodeAction.SHUTDOWN,nodeHeartbeat.getNodeAction()); checkDecommissionedNMCount(rm,++initialMetricCount); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testReboot() throws Exception { Configuration conf=new Configuration(); rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("host1:1234",5120); MockNM nm2=rm.registerNode("host2:1234",2048); int initialMetricCount=ClusterMetrics.getMetrics().getNumRebootedNMs(); NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); nodeHeartbeat=nm2.nodeHeartbeat(new HashMap>(),true,-100); Assert.assertTrue(NodeAction.RESYNC.equals(nodeHeartbeat.getNodeAction())); Assert.assertEquals("Too far behind rm response id:0 nm response id:-100",nodeHeartbeat.getDiagnosticsMessage()); checkRebootedNMCount(rm,++initialMetricCount); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeRegistrationWithMinimumAllocations() throws Exception { Configuration conf=new Configuration(); conf.set(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,"2048"); conf.set(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,"4"); rm=new MockRM(conf); rm.start(); ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService(); RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class); NodeId nodeId=BuilderUtils.newNodeId("host",1234); req.setNodeId(nodeId); Resource capability=BuilderUtils.newResource(1024,1); req.setResource(capability); RegisterNodeManagerResponse response1=resourceTrackerService.registerNodeManager(req); Assert.assertEquals(NodeAction.SHUTDOWN,response1.getNodeAction()); capability.setMemory(2048); capability.setVirtualCores(1); req.setResource(capability); RegisterNodeManagerResponse response2=resourceTrackerService.registerNodeManager(req); Assert.assertEquals(NodeAction.SHUTDOWN,response2.getNodeAction()); capability.setMemory(1024); capability.setVirtualCores(4); req.setResource(capability); RegisterNodeManagerResponse response3=resourceTrackerService.registerNodeManager(req); Assert.assertEquals(NodeAction.SHUTDOWN,response3.getNodeAction()); capability.setMemory(2048); capability.setVirtualCores(4); req.setResource(capability); RegisterNodeManagerResponse response4=resourceTrackerService.registerNodeManager(req); Assert.assertEquals(NodeAction.NORMAL,response4.getNodeAction()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Decommissioning using a pre-configured include hosts file */ @Test public void testDecommissionWithIncludeHosts() throws Exception { writeToHostsFile("localhost","host1","host2"); Configuration conf=new Configuration(); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath()); rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("host1:1234",5120); MockNM nm2=rm.registerNode("host2:5678",10240); MockNM nm3=rm.registerNode("localhost:4433",1024); ClusterMetrics metrics=ClusterMetrics.getMetrics(); assert (metrics != null); int metricCount=metrics.getNumDecommisionedNMs(); NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); nodeHeartbeat=nm2.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); nodeHeartbeat=nm3.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); String ip=NetUtils.normalizeHostName("localhost"); writeToHostsFile("host1",ip); rm.getNodesListManager().refreshNodes(conf); nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); Assert.assertEquals(0,ClusterMetrics.getMetrics().getNumDecommisionedNMs()); nodeHeartbeat=nm2.nodeHeartbeat(true); Assert.assertTrue("Node is not decommisioned.",NodeAction.SHUTDOWN.equals(nodeHeartbeat.getNodeAction())); checkDecommissionedNMCount(rm,++metricCount); nodeHeartbeat=nm3.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(nodeHeartbeat.getNodeAction())); Assert.assertEquals(metricCount,ClusterMetrics.getMetrics().getNumDecommisionedNMs()); }

    InternalCallVerifier EqualityVerifier 
    /** * Test RM read NM next heartBeat Interval correctly from Configuration file, * and NM get next heartBeat Interval from RM correctly */ @Test(timeout=50000) public void testGetNextHeartBeatInterval() throws Exception { Configuration conf=new Configuration(); conf.set(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS,"4000"); rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("host1:1234",5120); MockNM nm2=rm.registerNode("host2:5678",10240); NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertEquals(4000,nodeHeartbeat.getNextHeartBeatInterval()); NodeHeartbeatResponse nodeHeartbeat2=nm2.nodeHeartbeat(true); Assert.assertEquals(4000,nodeHeartbeat2.getNextHeartBeatInterval()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSetRMIdentifierInRegistration() throws Exception { Configuration conf=new Configuration(); rm=new MockRM(conf); rm.start(); MockNM nm=new MockNM("host1:1234",5120,rm.getResourceTrackerService()); RegisterNodeManagerResponse response=nm.registerNode(); Assert.assertEquals(ResourceManager.getClusterTimeStamp(),response.getRMIdentifier()); }

    InternalCallVerifier EqualityVerifier 
    /** * Decommissioning using a post-configured exclude hosts file */ @Test public void testAddNewExcludePathToConfiguration() throws Exception { Configuration conf=new Configuration(); rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("host1:1234",5120); MockNM nm2=rm.registerNode("host2:5678",10240); ClusterMetrics metrics=ClusterMetrics.getMetrics(); assert (metrics != null); int initialMetricCount=metrics.getNumDecommisionedNMs(); NodeHeartbeatResponse nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction()); nodeHeartbeat=nm2.nodeHeartbeat(true); Assert.assertEquals(NodeAction.NORMAL,nodeHeartbeat.getNodeAction()); writeToHostsFile("host2"); conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,hostFile.getAbsolutePath()); rm.getNodesListManager().refreshNodes(conf); nodeHeartbeat=nm1.nodeHeartbeat(true); Assert.assertEquals("Node should not have been decomissioned.",NodeAction.NORMAL,nodeHeartbeat.getNodeAction()); nodeHeartbeat=nm2.nodeHeartbeat(true); Assert.assertEquals("Node should have been decomissioned but is in state" + nodeHeartbeat.getNodeAction(),NodeAction.SHUTDOWN,nodeHeartbeat.getNodeAction()); checkDecommissionedNMCount(rm,++initialMetricCount); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodeRegistrationVersionLessThanRM() throws Exception { writeToHostsFile("host2"); Configuration conf=new Configuration(); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath()); conf.set(YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION,"EqualToRM"); rm=new MockRM(conf); rm.start(); String nmVersion="1.9.9"; ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService(); RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class); NodeId nodeId=NodeId.newInstance("host2",1234); Resource capability=BuilderUtils.newResource(1024,1); req.setResource(capability); req.setNodeId(nodeId); req.setHttpPort(1234); req.setNMVersion(nmVersion); RegisterNodeManagerResponse response=resourceTrackerService.registerNodeManager(req); Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction()); Assert.assertTrue("Diagnostic message did not contain: 'Disallowed NodeManager " + "Version " + nmVersion + ", is less than the minimum version'",response.getDiagnosticsMessage().contains("Disallowed NodeManager Version " + nmVersion + ", is less than the minimum version ")); }

    EqualityVerifier 
    @Test public void testUnhealthyNodeStatus() throws Exception { Configuration conf=new Configuration(); conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,hostFile.getAbsolutePath()); rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("host1:1234",5120); Assert.assertEquals(0,ClusterMetrics.getMetrics().getUnhealthyNMs()); nm1.nodeHeartbeat(true); nm1.nodeHeartbeat(false); checkUnealthyNMCount(rm,nm1,true,1); nm1.nodeHeartbeat(true); checkUnealthyNMCount(rm,nm1,false,0); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodeRegistrationSuccess() throws Exception { writeToHostsFile("host2"); Configuration conf=new Configuration(); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH,hostFile.getAbsolutePath()); rm=new MockRM(conf); rm.start(); ResourceTrackerService resourceTrackerService=rm.getResourceTrackerService(); RegisterNodeManagerRequest req=Records.newRecord(RegisterNodeManagerRequest.class); NodeId nodeId=NodeId.newInstance("host2",1234); Resource capability=BuilderUtils.newResource(1024,1); req.setResource(capability); req.setNodeId(nodeId); req.setHttpPort(1234); req.setNMVersion(YarnVersionInfo.getVersion()); RegisterNodeManagerResponse response=resourceTrackerService.registerNodeManager(req); Assert.assertEquals(NodeAction.NORMAL,response.getNodeAction()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testReconnectNode() throws Exception { final DrainDispatcher dispatcher=new DrainDispatcher(); rm=new MockRM(){ @Override protected EventHandler createSchedulerEventDispatcher(){ return new SchedulerEventDispatcher(this.scheduler){ @Override public void handle( SchedulerEvent event){ scheduler.handle(event); } } ; } @Override protected Dispatcher createDispatcher(){ return dispatcher; } } ; rm.start(); MockNM nm1=rm.registerNode("host1:1234",5120); MockNM nm2=rm.registerNode("host2:5678",5120); nm1.nodeHeartbeat(true); nm2.nodeHeartbeat(false); dispatcher.await(); checkUnealthyNMCount(rm,nm2,true,1); final int expectedNMs=ClusterMetrics.getMetrics().getNumActiveNMs(); QueueMetrics metrics=rm.getResourceScheduler().getRootQueueMetrics(); Assert.assertEquals(5120,metrics.getAvailableMB()); nm1=rm.registerNode("host1:1234",5120); NodeHeartbeatResponse response=nm1.nodeHeartbeat(true); Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); dispatcher.await(); Assert.assertEquals(expectedNMs,ClusterMetrics.getMetrics().getNumActiveNMs()); checkUnealthyNMCount(rm,nm2,true,1); nm2=rm.registerNode("host2:5678",5120); response=nm2.nodeHeartbeat(false); Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); dispatcher.await(); Assert.assertEquals(expectedNMs,ClusterMetrics.getMetrics().getNumActiveNMs()); checkUnealthyNMCount(rm,nm2,true,1); nm2=rm.registerNode("host2:5678",5120); dispatcher.await(); response=nm2.nodeHeartbeat(true); response=nm2.nodeHeartbeat(true); dispatcher.await(); Assert.assertEquals(5120 + 5120,metrics.getAvailableMB()); nm1=rm.registerNode("host2:5678",10240); dispatcher.await(); response=nm1.nodeHeartbeat(true); dispatcher.await(); Assert.assertTrue(NodeAction.NORMAL.equals(response.getNodeAction())); Assert.assertEquals(5120 + 10240,metrics.getAvailableMB()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestSubmitApplicationWithRMHA

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=5000) public void testHandleRMHADuringSubmitApplicationCallWithSavedApplicationState() throws Exception { startRMs(); RMApp app0=rm1.submitApp(200); explicitFailover(); Assert.assertTrue(rm2.getRMContext().getRMApps().containsKey(app0.getApplicationId())); RMApp app1=rm2.submitApp(200,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,configuration.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS),null,null,false,false,true,app0.getApplicationId()); Assert.assertEquals(app1.getApplicationId(),app0.getApplicationId()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test multiple calls of getApplicationReport, to make sure * it is idempotent */ @Test public void testGetApplicationReportIdempotent() throws Exception { startRMs(); RMApp app=rm1.submitApp(200); ApplicationReport appReport1=rm1.getApplicationReport(app.getApplicationId()); Assert.assertTrue(appReport1.getYarnApplicationState() == YarnApplicationState.ACCEPTED || appReport1.getYarnApplicationState() == YarnApplicationState.SUBMITTED); ApplicationReport appReport2=rm1.getApplicationReport(app.getApplicationId()); Assert.assertEquals(appReport1.getApplicationId(),appReport2.getApplicationId()); Assert.assertEquals(appReport1.getYarnApplicationState(),appReport2.getYarnApplicationState()); explicitFailover(); ApplicationReport appReport3=rm2.getApplicationReport(app.getApplicationId()); Assert.assertEquals(appReport1.getApplicationId(),appReport3.getApplicationId()); Assert.assertEquals(appReport1.getYarnApplicationState(),appReport3.getYarnApplicationState()); ApplicationReport appReport4=rm2.getApplicationReport(app.getApplicationId()); Assert.assertEquals(appReport3.getApplicationId(),appReport4.getApplicationId()); Assert.assertEquals(appReport3.getYarnApplicationState(),appReport4.getYarnApplicationState()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.TestWorkPreservingRMRestart

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=20000) public void testContainersNotRecoveredForCompletedApps() throws Exception { MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am1); rm2=new MockRM(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING); NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE); nm1.registerNode(Arrays.asList(runningContainer,completedContainer),null); RMApp recoveredApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId()); assertEquals(RMAppState.FINISHED,recoveredApp1.getState()); Thread.sleep(3000); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler(); assertNull(scheduler.getRMContainer(runningContainer.getContainerId())); assertNull(scheduler.getRMContainer(completedContainer.getContainerId())); }

    APIUtilityVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=20000) public void testSchedulerRecovery() throws Exception { conf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS,true); conf.set(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,DominantResourceCalculator.class.getName()); int containerMemory=1024; Resource containerResource=Resource.newInstance(containerMemory,1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",8192,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); rm1.clearQueueMetrics(app1); rm2=new MockRM(conf,memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); RMApp recoveredApp1=rm2.getRMContext().getRMApps().get(app1.getApplicationId()); RMAppAttempt loadedAttempt1=recoveredApp1.getCurrentAppAttempt(); NMContainerStatus amContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),1,ContainerState.RUNNING); NMContainerStatus runningContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),2,ContainerState.RUNNING); NMContainerStatus completedContainer=TestRMRestart.createNMContainerStatus(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE); nm1.registerNode(Arrays.asList(amContainer,runningContainer,completedContainer),null); waitForNumContainersToRecover(2,rm2,am1.getApplicationAttemptId()); Set launchedContainers=((RMNodeImpl)rm2.getRMContext().getRMNodes().get(nm1.getNodeId())).getLaunchedContainers(); assertTrue(launchedContainers.contains(amContainer.getContainerId())); assertTrue(launchedContainers.contains(runningContainer.getContainerId())); rm2.waitForState(nm1,amContainer.getContainerId(),RMContainerState.RUNNING); rm2.waitForState(nm1,runningContainer.getContainerId(),RMContainerState.RUNNING); rm2.waitForContainerToComplete(loadedAttempt1,completedContainer); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm2.getResourceScheduler(); SchedulerNode schedulerNode1=scheduler.getSchedulerNode(nm1.getNodeId()); Resource usedResources=Resources.multiply(containerResource,2); Resource nmResource=Resource.newInstance(nm1.getMemory(),nm1.getvCores()); assertTrue(schedulerNode1.isValidContainer(amContainer.getContainerId())); assertTrue(schedulerNode1.isValidContainer(runningContainer.getContainerId())); assertFalse(schedulerNode1.isValidContainer(completedContainer.getContainerId())); assertEquals(2,schedulerNode1.getNumContainers()); assertEquals(Resources.subtract(nmResource,usedResources),schedulerNode1.getAvailableResource()); assertEquals(usedResources,schedulerNode1.getUsedResource()); Resource availableResources=Resources.subtract(nmResource,usedResources); Map schedulerApps=((AbstractYarnScheduler)rm2.getResourceScheduler()).getSchedulerApplications(); SchedulerApplication schedulerApp=schedulerApps.get(recoveredApp1.getApplicationId()); if (schedulerClass.equals(CapacityScheduler.class)) { checkCSQueue(rm2,schedulerApp,nmResource,nmResource,usedResources,2); } else if (schedulerClass.equals(FifoScheduler.class)) { checkFifoQueue(schedulerApp,usedResources,availableResources); } SchedulerApplicationAttempt schedulerAttempt=schedulerApp.getCurrentAppAttempt(); assertTrue(schedulerAttempt.getLiveContainers().contains(scheduler.getRMContainer(amContainer.getContainerId()))); assertTrue(schedulerAttempt.getLiveContainers().contains(scheduler.getRMContainer(runningContainer.getContainerId()))); assertEquals(schedulerAttempt.getCurrentConsumption(),usedResources); if (scheduler.getClass() != FairScheduler.class) { assertEquals(availableResources,schedulerAttempt.getHeadroom()); } assertEquals((1 << 22) + 1,schedulerAttempt.getNewContainerId()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.ahs.TestRMApplicationHistoryWriter

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testWriteContainer() throws Exception { RMContainer container=createRMContainer(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1)); writer.containerStarted(container); ContainerHistoryData containerHD=null; for (int i=0; i < MAX_RETRIES; ++i) { containerHD=store.getContainer(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1)); if (containerHD != null) { break; } else { Thread.sleep(100); } } Assert.assertNotNull(containerHD); Assert.assertEquals(NodeId.newInstance("test host",-100),containerHD.getAssignedNode()); Assert.assertEquals(Resource.newInstance(-1,-1),containerHD.getAllocatedResource()); Assert.assertEquals(Priority.UNDEFINED,containerHD.getPriority()); Assert.assertEquals(0L,container.getCreationTime()); writer.containerFinished(container); for (int i=0; i < MAX_RETRIES; ++i) { containerHD=store.getContainer(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1)); if (containerHD.getContainerState() != null) { break; } else { Thread.sleep(100); } } Assert.assertEquals("test diagnostics info",containerHD.getDiagnosticsInfo()); Assert.assertEquals(-1,containerHD.getContainerExitStatus()); Assert.assertEquals(ContainerState.COMPLETE,containerHD.getContainerState()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testWriteApplicationAttempt() throws Exception { RMAppAttempt appAttempt=createRMAppAttempt(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1)); writer.applicationAttemptStarted(appAttempt); ApplicationAttemptHistoryData appAttemptHD=null; for (int i=0; i < MAX_RETRIES; ++i) { appAttemptHD=store.getApplicationAttempt(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1)); if (appAttemptHD != null) { break; } else { Thread.sleep(100); } } Assert.assertNotNull(appAttemptHD); Assert.assertEquals("test host",appAttemptHD.getHost()); Assert.assertEquals(-100,appAttemptHD.getRPCPort()); Assert.assertEquals(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1),1),appAttemptHD.getMasterContainerId()); writer.applicationAttemptFinished(appAttempt,RMAppAttemptState.FINISHED); for (int i=0; i < MAX_RETRIES; ++i) { appAttemptHD=store.getApplicationAttempt(ApplicationAttemptId.newInstance(ApplicationId.newInstance(0,1),1)); if (appAttemptHD.getYarnApplicationAttemptState() != null) { break; } else { Thread.sleep(100); } } Assert.assertEquals("test diagnostics info",appAttemptHD.getDiagnosticsInfo()); Assert.assertEquals("test url",appAttemptHD.getTrackingURL()); Assert.assertEquals(FinalApplicationStatus.UNDEFINED,appAttemptHD.getFinalApplicationStatus()); Assert.assertEquals(YarnApplicationAttemptState.FINISHED,appAttemptHD.getYarnApplicationAttemptState()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testWriteApplication() throws Exception { RMApp app=createRMApp(ApplicationId.newInstance(0,1)); writer.applicationStarted(app); ApplicationHistoryData appHD=null; for (int i=0; i < MAX_RETRIES; ++i) { appHD=store.getApplication(ApplicationId.newInstance(0,1)); if (appHD != null) { break; } else { Thread.sleep(100); } } Assert.assertNotNull(appHD); Assert.assertEquals("test app",appHD.getApplicationName()); Assert.assertEquals("test app type",appHD.getApplicationType()); Assert.assertEquals("test user",appHD.getUser()); Assert.assertEquals("test queue",appHD.getQueue()); Assert.assertEquals(0L,appHD.getSubmitTime()); Assert.assertEquals(1L,appHD.getStartTime()); writer.applicationFinished(app,RMAppState.FINISHED); for (int i=0; i < MAX_RETRIES; ++i) { appHD=store.getApplication(ApplicationId.newInstance(0,1)); if (appHD.getYarnApplicationState() != null) { break; } else { Thread.sleep(100); } } Assert.assertEquals(2L,appHD.getFinishTime()); Assert.assertEquals("test diagnostics info",appHD.getDiagnosticsInfo()); Assert.assertEquals(FinalApplicationStatus.UNDEFINED,appHD.getFinalApplicationStatus()); Assert.assertEquals(YarnApplicationState.FINISHED,appHD.getYarnApplicationState()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.TestAMRMRPCNodeUpdates

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAMRMUnusableNodes() throws Exception { MockNM nm1=rm.registerNode("127.0.0.1:1234",10000); MockNM nm2=rm.registerNode("127.0.0.2:1234",10000); MockNM nm3=rm.registerNode("127.0.0.3:1234",10000); MockNM nm4=rm.registerNode("127.0.0.4:1234",10000); dispatcher.await(); RMApp app1=rm.submitApp(2000); nm1.nodeHeartbeat(true); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); AllocateRequest allocateRequest1=AllocateRequest.newInstance(0,0F,null,null,null); AllocateResponse response1=allocate(attempt1.getAppAttemptId(),allocateRequest1); List updatedNodes=response1.getUpdatedNodes(); Assert.assertEquals(0,updatedNodes.size()); syncNodeHeartbeat(nm4,false); allocateRequest1=AllocateRequest.newInstance(response1.getResponseId(),0F,null,null,null); response1=allocate(attempt1.getAppAttemptId(),allocateRequest1); updatedNodes=response1.getUpdatedNodes(); Assert.assertEquals(1,updatedNodes.size()); NodeReport nr=updatedNodes.iterator().next(); Assert.assertEquals(nm4.getNodeId(),nr.getNodeId()); Assert.assertEquals(NodeState.UNHEALTHY,nr.getNodeState()); response1=allocate(attempt1.getAppAttemptId(),allocateRequest1); updatedNodes=response1.getUpdatedNodes(); Assert.assertEquals(1,updatedNodes.size()); nr=updatedNodes.iterator().next(); Assert.assertEquals(nm4.getNodeId(),nr.getNodeId()); Assert.assertEquals(NodeState.UNHEALTHY,nr.getNodeState()); syncNodeLost(nm3); allocateRequest1=AllocateRequest.newInstance(response1.getResponseId(),0F,null,null,null); response1=allocate(attempt1.getAppAttemptId(),allocateRequest1); updatedNodes=response1.getUpdatedNodes(); Assert.assertEquals(1,updatedNodes.size()); nr=updatedNodes.iterator().next(); Assert.assertEquals(nm3.getNodeId(),nr.getNodeId()); Assert.assertEquals(NodeState.LOST,nr.getNodeState()); RMApp app2=rm.submitApp(2000); nm2.nodeHeartbeat(true); RMAppAttempt attempt2=app2.getCurrentAppAttempt(); MockAM am2=rm.sendAMLaunched(attempt2.getAppAttemptId()); am2.registerAppAttempt(); AllocateRequest allocateRequest2=AllocateRequest.newInstance(0,0F,null,null,null); AllocateResponse response2=allocate(attempt2.getAppAttemptId(),allocateRequest2); updatedNodes=response2.getUpdatedNodes(); Assert.assertEquals(0,updatedNodes.size()); syncNodeHeartbeat(nm4,true); allocateRequest1=AllocateRequest.newInstance(response1.getResponseId(),0F,null,null,null); response1=allocate(attempt1.getAppAttemptId(),allocateRequest1); updatedNodes=response1.getUpdatedNodes(); Assert.assertEquals(1,updatedNodes.size()); nr=updatedNodes.iterator().next(); Assert.assertEquals(nm4.getNodeId(),nr.getNodeId()); Assert.assertEquals(NodeState.RUNNING,nr.getNodeState()); allocateRequest2=AllocateRequest.newInstance(response2.getResponseId(),0F,null,null,null); response2=allocate(attempt2.getAppAttemptId(),allocateRequest2); updatedNodes=response2.getUpdatedNodes(); Assert.assertEquals(1,updatedNodes.size()); nr=updatedNodes.iterator().next(); Assert.assertEquals(nm4.getNodeId(),nr.getNodeId()); Assert.assertEquals(NodeState.RUNNING,nr.getNodeState()); allocateRequest2=AllocateRequest.newInstance(response2.getResponseId(),0F,null,null,null); response2=allocate(attempt2.getAppAttemptId(),allocateRequest2); updatedNodes=response2.getUpdatedNodes(); Assert.assertEquals(0,updatedNodes.size()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.TestAMRMRPCResponseId

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testARRMResponseId() throws Exception { MockNM nm1=rm.registerNode("h1:1234",5000); RMApp app=rm.submitApp(2000); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app.getCurrentAppAttempt(); MockAM am=rm.sendAMLaunched(attempt.getAppAttemptId()); am.registerAppAttempt(); AllocateRequest allocateRequest=AllocateRequest.newInstance(0,0F,null,null,null); AllocateResponse response=allocate(attempt.getAppAttemptId(),allocateRequest); Assert.assertEquals(1,response.getResponseId()); Assert.assertTrue(response.getAMCommand() == null); allocateRequest=AllocateRequest.newInstance(response.getResponseId(),0F,null,null,null); response=allocate(attempt.getAppAttemptId(),allocateRequest); Assert.assertEquals(2,response.getResponseId()); response=allocate(attempt.getAppAttemptId(),allocateRequest); Assert.assertEquals(2,response.getResponseId()); allocateRequest=AllocateRequest.newInstance(0,0F,null,null,null); response=allocate(attempt.getAppAttemptId(),allocateRequest); Assert.assertTrue(response.getAMCommand() == AMCommand.AM_RESYNC); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.TestAMRestart

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testAMRestartWithExistingContainers() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,2); MockRM rm1=new MockRM(conf); rm1.start(); RMApp app1=rm1.submitApp(200,"name","user",new HashMap(),false,"default",-1,null,"MAPREDUCE",false,true); MockNM nm1=new MockNM("127.0.0.1:1234",10240,rm1.getResourceTrackerService()); nm1.registerNode(); MockNM nm2=new MockNM("127.0.0.1:2351",4089,rm1.getResourceTrackerService()); nm2.registerNode(); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); int NUM_CONTAINERS=3; am1.allocate("127.0.0.1",1024,NUM_CONTAINERS,new ArrayList()); nm1.nodeHeartbeat(true); List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); while (containers.size() != NUM_CONTAINERS) { nm1.nodeHeartbeat(true); containers.addAll(am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers()); Thread.sleep(200); } nm1.nodeHeartbeat(am1.getApplicationAttemptId(),2,ContainerState.RUNNING); ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2); rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.RUNNING); ContainerId containerId3=ContainerId.newInstance(am1.getApplicationAttemptId(),3); rm1.waitForState(nm1,containerId3,RMContainerState.RUNNING); ContainerId containerId4=ContainerId.newInstance(am1.getApplicationAttemptId(),4); rm1.waitForState(nm1,containerId4,RMContainerState.ACQUIRED); am1.allocate("127.0.0.1",1024,1,new ArrayList()); nm1.nodeHeartbeat(true); ContainerId containerId5=ContainerId.newInstance(am1.getApplicationAttemptId(),5); rm1.waitForContainerAllocated(nm1,containerId5); rm1.waitForState(nm1,containerId5,RMContainerState.ALLOCATED); am1.allocate("127.0.0.1",6000,1,new ArrayList()); ContainerId containerId6=ContainerId.newInstance(am1.getApplicationAttemptId(),6); nm1.nodeHeartbeat(true); SchedulerApplicationAttempt schedulerAttempt=((AbstractYarnScheduler)rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId6); while (schedulerAttempt.getReservedContainers().isEmpty()) { System.out.println("Waiting for container " + containerId6 + " to be reserved."); nm1.nodeHeartbeat(true); Thread.sleep(200); } Assert.assertEquals(containerId6,schedulerAttempt.getReservedContainers().get(0).getContainerId()); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE); am1.waitForState(RMAppAttemptState.FAILED); Thread.sleep(3000); rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING); Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId4)); Assert.assertNull(rm1.getResourceScheduler().getRMContainer(containerId5)); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); ApplicationAttemptId newAttemptId=app1.getCurrentAppAttempt().getAppAttemptId(); Assert.assertFalse(newAttemptId.equals(am1.getApplicationAttemptId())); RMAppAttempt attempt2=app1.getCurrentAppAttempt(); nm1.nodeHeartbeat(true); MockAM am2=rm1.sendAMLaunched(attempt2.getAppAttemptId()); RegisterApplicationMasterResponse registerResponse=am2.registerAppAttempt(); Assert.assertEquals(2,registerResponse.getContainersFromPreviousAttempts().size()); boolean containerId2Exists=false, containerId3Exists=false; for ( Container container : registerResponse.getContainersFromPreviousAttempts()) { if (container.getId().equals(containerId2)) { containerId2Exists=true; } if (container.getId().equals(containerId3)) { containerId3Exists=true; } } Assert.assertTrue(containerId2Exists && containerId3Exists); rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.COMPLETE); RMAppAttempt newAttempt=app1.getRMAppAttempt(am2.getApplicationAttemptId()); waitForContainersToFinish(4,newAttempt); boolean container3Exists=false, container4Exists=false, container5Exists=false, container6Exists=false; for ( ContainerStatus status : newAttempt.getJustFinishedContainers()) { if (status.getContainerId().equals(containerId3)) { container3Exists=true; } if (status.getContainerId().equals(containerId4)) { container4Exists=true; } if (status.getContainerId().equals(containerId5)) { container5Exists=true; } if (status.getContainerId().equals(containerId6)) { container6Exists=true; } } Assert.assertTrue(container3Exists && container4Exists && container5Exists&& container6Exists); rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING); SchedulerApplicationAttempt schedulerNewAttempt=((AbstractYarnScheduler)rm1.getResourceScheduler()).getCurrentAttemptForContainer(containerId2); MockRM.finishAMAndVerifyAppState(app1,rm1,nm1,am2); Assert.assertFalse(schedulerNewAttempt.getLiveContainers().contains(containerId2)); System.out.println("New attempt's just finished containers: " + newAttempt.getJustFinishedContainers()); waitForContainersToFinish(5,newAttempt); rm1.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testNMTokensRebindOnAMRestart() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,3); MockRM rm1=new MockRM(conf); rm1.start(); RMApp app1=rm1.submitApp(200,"myname","myuser",new HashMap(),false,"default",-1,null,"MAPREDUCE",false,true); MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService()); nm1.registerNode(); MockNM nm2=new MockNM("127.1.1.1:4321",8000,rm1.getResourceTrackerService()); nm2.registerNode(); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); List containers=new ArrayList(); List expectedNMTokens=new ArrayList(); while (true) { AllocateResponse response=am1.allocate("127.0.0.1",2000,2,new ArrayList()); nm1.nodeHeartbeat(true); containers.addAll(response.getAllocatedContainers()); expectedNMTokens.addAll(response.getNMTokens()); if (containers.size() == 2) { break; } Thread.sleep(200); System.out.println("Waiting for container to be allocated."); } nm1.nodeHeartbeat(am1.getApplicationAttemptId(),2,ContainerState.RUNNING); ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2); rm1.waitForState(nm1,containerId2,RMContainerState.RUNNING); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),3,ContainerState.RUNNING); ContainerId containerId3=ContainerId.newInstance(am1.getApplicationAttemptId(),3); rm1.waitForState(nm1,containerId3,RMContainerState.RUNNING); nm1.nodeHeartbeat(am1.getApplicationAttemptId(),1,ContainerState.COMPLETE); am1.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am2=MockRM.launchAM(app1,rm1,nm1); RegisterApplicationMasterResponse registerResponse=am2.registerAppAttempt(); rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING); Assert.assertEquals(expectedNMTokens,registerResponse.getNMTokensFromPreviousAttempts()); containers=new ArrayList(); while (true) { AllocateResponse allocateResponse=am2.allocate("127.1.1.1",4000,1,new ArrayList()); nm2.nodeHeartbeat(true); containers.addAll(allocateResponse.getAllocatedContainers()); expectedNMTokens.addAll(allocateResponse.getNMTokens()); if (containers.size() == 1) { break; } Thread.sleep(200); System.out.println("Waiting for container to be allocated."); } nm1.nodeHeartbeat(am2.getApplicationAttemptId(),2,ContainerState.RUNNING); ContainerId am2ContainerId2=ContainerId.newInstance(am2.getApplicationAttemptId(),2); rm1.waitForState(nm1,am2ContainerId2,RMContainerState.RUNNING); nm1.nodeHeartbeat(am2.getApplicationAttemptId(),1,ContainerState.COMPLETE); am2.waitForState(RMAppAttemptState.FAILED); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am3=MockRM.launchAM(app1,rm1,nm1); registerResponse=am3.registerAppAttempt(); rm1.waitForState(app1.getApplicationId(),RMAppState.RUNNING); List transferredTokens=registerResponse.getNMTokensFromPreviousAttempts(); Assert.assertEquals(2,transferredTokens.size()); Assert.assertTrue(transferredTokens.containsAll(expectedNMTokens)); rm1.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=20000) public void testPreemptedAMRestartOnRMRestart() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class); conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true); conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName()); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); CapacityScheduler scheduler=(CapacityScheduler)rm1.getResourceScheduler(); ContainerId amContainer=ContainerId.newInstance(am1.getApplicationAttemptId(),1); scheduler.killContainer(scheduler.getRMContainer(amContainer)); am1.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(!attempt1.shouldCountTowardsMaxAttemptRetry()); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId()); Assert.assertEquals(1,appState.getAttemptCount()); Assert.assertEquals(ContainerExitStatus.PREEMPTED,appState.getAttempt(am1.getApplicationAttemptId()).getAMContainerExitStatus()); MockRM rm2=new MockRM(conf,memStore); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); nm1.registerNode(); rm2.start(); MockAM am2=rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1); MockRM.finishAMAndVerifyAppState(app1,rm2,nm1,am2); RMAppAttempt attempt2=rm2.getRMContext().getRMApps().get(app1.getApplicationId()).getCurrentAppAttempt(); Assert.assertTrue(attempt2.shouldCountTowardsMaxAttemptRetry()); Assert.assertEquals(ContainerExitStatus.INVALID,appState.getAttempt(am2.getApplicationAttemptId()).getAMContainerExitStatus()); rm1.stop(); rm2.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=100000) public void testShouldNotCountFailureToMaxAttemptRetry() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true); conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName()); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); CapacityScheduler scheduler=(CapacityScheduler)rm1.getResourceScheduler(); ContainerId amContainer=ContainerId.newInstance(am1.getApplicationAttemptId(),1); scheduler.killContainer(scheduler.getRMContainer(amContainer)); am1.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(!attempt1.shouldCountTowardsMaxAttemptRetry()); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId()); MockAM am2=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1); RMAppAttempt attempt2=app1.getCurrentAppAttempt(); Assert.assertTrue(((RMAppAttemptImpl)attempt2).mayBeLastAttempt()); ContainerId amContainer2=ContainerId.newInstance(am2.getApplicationAttemptId(),1); scheduler.killContainer(scheduler.getRMContainer(amContainer2)); am2.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(!attempt2.shouldCountTowardsMaxAttemptRetry()); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am3=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),3,nm1); RMAppAttempt attempt3=app1.getCurrentAppAttempt(); Assert.assertTrue(((RMAppAttemptImpl)attempt3).mayBeLastAttempt()); ContainerStatus containerStatus=Records.newRecord(ContainerStatus.class); containerStatus.setContainerId(attempt3.getMasterContainer().getId()); containerStatus.setDiagnostics("mimic NM disk_failure"); containerStatus.setState(ContainerState.COMPLETE); containerStatus.setExitStatus(ContainerExitStatus.DISKS_FAILED); Map> conts=new HashMap>(); conts.put(app1.getApplicationId(),Collections.singletonList(containerStatus)); nm1.nodeHeartbeat(conts,true); am3.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(!attempt3.shouldCountTowardsMaxAttemptRetry()); Assert.assertEquals(ContainerExitStatus.DISKS_FAILED,appState.getAttempt(am3.getApplicationAttemptId()).getAMContainerExitStatus()); rm1.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am4=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),4,nm1); RMAppAttempt attempt4=app1.getCurrentAppAttempt(); Assert.assertTrue(((RMAppAttemptImpl)attempt4).mayBeLastAttempt()); MockNM nm2=new MockNM("127.0.0.1:2234",8000,rm1.getResourceTrackerService()); nm2.registerNode(); nm1.nodeHeartbeat(false); am4.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(!attempt4.shouldCountTowardsMaxAttemptRetry()); Assert.assertEquals(ContainerExitStatus.ABORTED,appState.getAttempt(am4.getApplicationAttemptId()).getAMContainerExitStatus()); nm2.nodeHeartbeat(true); MockAM am5=rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),5,nm2); RMAppAttempt attempt5=app1.getCurrentAppAttempt(); Assert.assertTrue(((RMAppAttemptImpl)attempt5).mayBeLastAttempt()); nm2.nodeHeartbeat(am5.getApplicationAttemptId(),1,ContainerState.COMPLETE); am5.waitForState(RMAppAttemptState.FAILED); Assert.assertTrue(attempt5.shouldCountTowardsMaxAttemptRetry()); rm1.waitForState(app1.getApplicationId(),RMAppState.FAILED); Assert.assertEquals(5,app1.getAppAttempts().size()); rm1.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=50000) public void testRMRestartOrFailoverNotCountedForAMFailures() throws Exception { YarnConfiguration conf=new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class); conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED,true); conf.set(YarnConfiguration.RM_STORE,MemoryRMStateStore.class.getName()); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,1); MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); MockRM rm1=new MockRM(conf,memStore); rm1.start(); MockNM nm1=new MockNM("127.0.0.1:1234",8000,rm1.getResourceTrackerService()); nm1.registerNode(); RMApp app1=rm1.submitApp(200); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); Assert.assertTrue(((RMAppAttemptImpl)attempt1).mayBeLastAttempt()); MockRM rm2=new MockRM(conf,memStore); rm2.start(); ApplicationState appState=memStore.getState().getApplicationState().get(app1.getApplicationId()); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); NMContainerStatus status=Records.newRecord(NMContainerStatus.class); status.setContainerExitStatus(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER); status.setContainerId(attempt1.getMasterContainer().getId()); status.setContainerState(ContainerState.COMPLETE); status.setDiagnostics(""); nm1.registerNode(Collections.singletonList(status),null); rm2.waitForState(attempt1.getAppAttemptId(),RMAppAttemptState.FAILED); Assert.assertEquals(ContainerExitStatus.KILLED_BY_RESOURCEMANAGER,appState.getAttempt(am1.getApplicationAttemptId()).getAMContainerExitStatus()); rm2.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); MockAM am2=rm2.waitForNewAMToLaunchAndRegister(app1.getApplicationId(),2,nm1); MockRM.finishAMAndVerifyAppState(app1,rm2,nm1,am2); RMAppAttempt attempt3=rm2.getRMContext().getRMApps().get(app1.getApplicationId()).getCurrentAppAttempt(); Assert.assertTrue(attempt3.shouldCountTowardsMaxAttemptRetry()); Assert.assertEquals(ContainerExitStatus.INVALID,appState.getAttempt(am2.getApplicationAttemptId()).getAMContainerExitStatus()); rm1.stop(); rm2.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicy

    InternalCallVerifier EqualityVerifier 
    @Test public void testExpireKill(){ final long killTime=10000L; int[][] qData=new int[][]{{100,40,40,20},{100,100,100,100},{100,0,60,40},{10,10,0,0},{0,0,0,0},{3,1,1,1},{-1,1,1,1},{3,0,0,0}}; conf.setLong(WAIT_TIME_BEFORE_KILL,killTime); ProportionalCapacityPreemptionPolicy policy=buildPolicy(qData); when(mClock.getTime()).thenReturn(0L); policy.editSchedule(); verify(mDisp,times(10)).handle(argThat(new IsPreemptionRequestFor(appC))); when(mClock.getTime()).thenReturn(killTime / 2); policy.editSchedule(); verify(mDisp,times(20)).handle(argThat(new IsPreemptionRequestFor(appC))); when(mClock.getTime()).thenReturn(killTime + 1); policy.editSchedule(); verify(mDisp,times(30)).handle(evtCaptor.capture()); List events=evtCaptor.getAllValues(); for ( ContainerPreemptEvent e : events.subList(20,30)) { assertEquals(appC,e.getAppId()); assertEquals(KILL_CONTAINER,e.getType()); } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.recovery.TestZKRMStateStore

    InternalCallVerifier EqualityVerifier 
    @SuppressWarnings("unchecked") @Test public void testFencing() throws Exception { StateChangeRequestInfo req=new StateChangeRequestInfo(HAServiceProtocol.RequestSource.REQUEST_BY_USER); Configuration conf1=createHARMConf("rm1,rm2","rm1",1234); conf1.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); ResourceManager rm1=new ResourceManager(); rm1.init(conf1); rm1.start(); rm1.getRMContext().getRMAdminService().transitionToActive(req); assertEquals("RM with ZKStore didn't start",Service.STATE.STARTED,rm1.getServiceState()); assertEquals("RM should be Active",HAServiceProtocol.HAServiceState.ACTIVE,rm1.getRMContext().getRMAdminService().getServiceStatus().getState()); Configuration conf2=createHARMConf("rm1,rm2","rm2",5678); conf2.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED,false); ResourceManager rm2=new ResourceManager(); rm2.init(conf2); rm2.start(); rm2.getRMContext().getRMAdminService().transitionToActive(req); assertEquals("RM with ZKStore didn't start",Service.STATE.STARTED,rm2.getServiceState()); assertEquals("RM should be Active",HAServiceProtocol.HAServiceState.ACTIVE,rm2.getRMContext().getRMAdminService().getServiceStatus().getState()); for (int i=0; i < ZK_TIMEOUT_MS / 50; i++) { if (HAServiceProtocol.HAServiceState.ACTIVE == rm1.getRMContext().getRMAdminService().getServiceStatus().getState()) { Thread.sleep(100); } } assertEquals("RM should have been fenced",HAServiceProtocol.HAServiceState.STANDBY,rm1.getRMContext().getRMAdminService().getServiceStatus().getState()); assertEquals("RM should be Active",HAServiceProtocol.HAServiceState.ACTIVE,rm2.getRMContext().getRMAdminService().getServiceStatus().getState()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.recovery.TestZKRMStateStoreZKClientConnections

    InternalCallVerifier EqualityVerifier 
    @Test public void testZKRetryInterval() throws Exception { TestZKClient zkClientTester=new TestZKClient(); YarnConfiguration conf=new YarnConfiguration(); ZKRMStateStore store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf); assertEquals(YarnConfiguration.DEFAULT_RM_ZK_RETRY_INTERVAL_MS,store.zkRetryInterval); store.stop(); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf); assertEquals(YarnConfiguration.DEFAULT_RM_ZK_TIMEOUT_MS / YarnConfiguration.DEFAULT_ZK_RM_NUM_RETRIES,store.zkRetryInterval); store.stop(); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=20000) public void testZKSessionTimeout() throws Exception { TestZKClient zkClientTester=new TestZKClient(); String path="/test"; YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,ZK_TIMEOUT_MS); ZKRMStateStore store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf); TestDispatcher dispatcher=new TestDispatcher(); store.setRMDispatcher(dispatcher); zkClientTester.forExpire=true; store.createWithRetries(path,null,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); store.getDataWithRetries(path,true); store.setDataWithRetries(path,"bytes".getBytes(),0); zkClientTester.syncBarrier.await(); try { byte[] ret=store.getDataWithRetries(path,false); assertEquals("bytes",new String(ret)); } catch ( Exception e) { String error="New session creation failed"; LOG.error(error,e); fail(error); } }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=20000) public void testZKClientDisconnectAndReconnect() throws Exception { TestZKClient zkClientTester=new TestZKClient(); String path="/test"; YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS,ZK_TIMEOUT_MS); ZKRMStateStore store=(ZKRMStateStore)zkClientTester.getRMStateStore(conf); TestDispatcher dispatcher=new TestDispatcher(); store.setRMDispatcher(dispatcher); store.createWithRetries(path,null,ZooDefs.Ids.OPEN_ACL_UNSAFE,CreateMode.PERSISTENT); store.getDataWithRetries(path,true); store.setDataWithRetries(path,"newBytes".getBytes(),0); stopServer(); zkClientTester.watcher.waitForDisconnected(ZK_OP_WAIT_TIME); try { store.getDataWithRetries(path,true); fail("Expected ZKClient time out exception"); } catch ( Exception e) { assertTrue(e.getMessage().contains("Wait for ZKClient creation timed out")); } startServer(); zkClientTester.watcher.waitForConnected(ZK_OP_WAIT_TIME); byte[] ret=null; try { ret=store.getDataWithRetries(path,true); } catch ( Exception e) { String error="ZKRMStateStore Session restore failed"; LOG.error(error,e); fail(error); } assertEquals("newBytes",new String(ret)); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.resource.TestResourceWeights

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=3000) public void testWeights(){ ResourceWeights rw1=new ResourceWeights(); Assert.assertEquals("Default CPU weight should be 0.0f.",0.0f,rw1.getWeight(ResourceType.CPU),0.00001f); Assert.assertEquals("Default memory weight should be 0.0f",0.0f,rw1.getWeight(ResourceType.MEMORY),0.00001f); ResourceWeights rw2=new ResourceWeights(2.0f); Assert.assertEquals("The CPU weight should be 2.0f.",2.0f,rw2.getWeight(ResourceType.CPU),0.00001f); Assert.assertEquals("The memory weight should be 2.0f",2.0f,rw2.getWeight(ResourceType.MEMORY),0.00001f); ResourceWeights rw3=new ResourceWeights(1.5f,2.0f); Assert.assertEquals("The CPU weight should be 2.0f",2.0f,rw3.getWeight(ResourceType.CPU),0.00001f); Assert.assertEquals("The memory weight should be 1.5f",1.5f,rw3.getWeight(ResourceType.MEMORY),0.00001f); rw3.setWeight(ResourceType.CPU,2.5f); Assert.assertEquals("The CPU weight should be set to 2.5f.",2.5f,rw3.getWeight(ResourceType.CPU),0.00001f); rw3.setWeight(ResourceType.MEMORY,4.0f); Assert.assertEquals("The memory weight should be set to 4.0f.",4.0f,rw3.getWeight(ResourceType.MEMORY),0.00001f); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.resource.TestResources

    EqualityVerifier 
    @Test(timeout=1000) public void testComponentwiseMin(){ assertEquals(createResource(1,1),componentwiseMin(createResource(1,1),createResource(2,2))); assertEquals(createResource(1,1),componentwiseMin(createResource(2,2),createResource(1,1))); assertEquals(createResource(1,1),componentwiseMin(createResource(1,2),createResource(2,1))); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.TestNMExpiry

    EqualityVerifier 
    @Test public void testNMExpiry() throws Exception { String hostname1="localhost1"; String hostname2="localhost2"; String hostname3="localhost3"; Resource capability=BuilderUtils.newResource(1024,1); RegisterNodeManagerRequest request1=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); NodeId nodeId1=NodeId.newInstance(hostname1,0); request1.setNodeId(nodeId1); request1.setHttpPort(0); request1.setResource(capability); resourceTrackerService.registerNodeManager(request1); RegisterNodeManagerRequest request2=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); NodeId nodeId2=NodeId.newInstance(hostname2,0); request2.setNodeId(nodeId2); request2.setHttpPort(0); request2.setResource(capability); resourceTrackerService.registerNodeManager(request2); int waitCount=0; while (ClusterMetrics.getMetrics().getNumLostNMs() != 2 && waitCount++ < 20) { synchronized (this) { wait(100); } } Assert.assertEquals(2,ClusterMetrics.getMetrics().getNumLostNMs()); request3=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); NodeId nodeId3=NodeId.newInstance(hostname3,0); request3.setNodeId(nodeId3); request3.setHttpPort(0); request3.setResource(capability); resourceTrackerService.registerNodeManager(request3); stopT=false; new ThirdNodeHeartBeatThread().start(); Assert.assertEquals(2,ClusterMetrics.getMetrics().getNumLostNMs()); stopT=true; }

    Class: org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.TestNMReconnect

    EqualityVerifier 
    @Test public void testReconnect() throws Exception { String hostname1="localhost1"; Resource capability=BuilderUtils.newResource(1024,1); RegisterNodeManagerRequest request1=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); NodeId nodeId1=NodeId.newInstance(hostname1,0); request1.setNodeId(nodeId1); request1.setHttpPort(0); request1.setResource(capability); resourceTrackerService.registerNodeManager(request1); Assert.assertEquals(RMNodeEventType.STARTED,rmNodeEvent.getType()); rmNodeEvent=null; resourceTrackerService.registerNodeManager(request1); Assert.assertEquals(RMNodeEventType.RECONNECTED,rmNodeEvent.getType()); rmNodeEvent=null; resourceTrackerService.registerNodeManager(request1); capability=BuilderUtils.newResource(1024,2); request1.setResource(capability); Assert.assertEquals(RMNodeEventType.RECONNECTED,rmNodeEvent.getType()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.TestRMNMRPCResponseId

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRPCResponseId() throws IOException, YarnException { String node="localhost"; Resource capability=BuilderUtils.newResource(1024,1); RegisterNodeManagerRequest request=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); nodeId=NodeId.newInstance(node,1234); request.setNodeId(nodeId); request.setHttpPort(0); request.setResource(capability); RegisterNodeManagerRequest request1=recordFactory.newRecordInstance(RegisterNodeManagerRequest.class); request1.setNodeId(nodeId); request1.setHttpPort(0); request1.setResource(capability); resourceTrackerService.registerNodeManager(request1); org.apache.hadoop.yarn.server.api.records.NodeStatus nodeStatus=recordFactory.newRecordInstance(org.apache.hadoop.yarn.server.api.records.NodeStatus.class); nodeStatus.setNodeId(nodeId); NodeHealthStatus nodeHealthStatus=recordFactory.newRecordInstance(NodeHealthStatus.class); nodeHealthStatus.setIsNodeHealthy(true); nodeStatus.setNodeHealthStatus(nodeHealthStatus); NodeHeartbeatRequest nodeHeartBeatRequest=recordFactory.newRecordInstance(NodeHeartbeatRequest.class); nodeHeartBeatRequest.setNodeStatus(nodeStatus); nodeStatus.setResponseId(0); NodeHeartbeatResponse response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest); Assert.assertTrue(response.getResponseId() == 1); nodeStatus.setResponseId(response.getResponseId()); response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest); Assert.assertTrue(response.getResponseId() == 2); response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest); Assert.assertTrue(response.getResponseId() == 2); nodeStatus.setResponseId(0); response=resourceTrackerService.nodeHeartbeat(nodeHeartBeatRequest); Assert.assertTrue(NodeAction.RESYNC.equals(response.getNodeAction())); Assert.assertEquals("Too far behind rm response id:2 nm response id:0",response.getDiagnosticsMessage()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.rmapp.TestRMAppTransitions

    IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testAppRunningFailed() throws IOException { LOG.info("--- START: testAppRunningFailed ---"); RMApp application=testCreateAppRunning(null); RMAppAttempt appAttempt=application.getCurrentAppAttempt(); int expectedAttemptId=1; Assert.assertEquals(expectedAttemptId,appAttempt.getAppAttemptId().getAttemptId()); Assert.assertTrue(maxAppAttempts > 1); for (int i=1; i < maxAppAttempts; i++) { RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false); application.handle(event); rmDispatcher.await(); assertAppState(RMAppState.ACCEPTED,application); appAttempt=application.getCurrentAppAttempt(); Assert.assertEquals(++expectedAttemptId,appAttempt.getAppAttemptId().getAttemptId()); event=new RMAppEvent(application.getApplicationId(),RMAppEventType.APP_ACCEPTED); application.handle(event); rmDispatcher.await(); assertAppState(RMAppState.ACCEPTED,application); event=new RMAppEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_REGISTERED); application.handle(event); rmDispatcher.await(); assertAppState(RMAppState.RUNNING,application); } RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false); application.handle(event); rmDispatcher.await(); sendAppUpdateSavedEvent(application); assertFailed(application,".*Failing the application.*"); assertAppFinalStateSaved(application); event=new RMAppEvent(application.getApplicationId(),RMAppEventType.KILL); application.handle(event); rmDispatcher.await(); assertFailed(application,".*Failing the application.*"); assertAppFinalStateSaved(application); verifyApplicationFinished(RMAppState.FAILED); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetAppReport(){ RMApp app=createNewTestApp(null); assertAppState(RMAppState.NEW,app); ApplicationReport report=app.createAndGetApplicationReport(null,true); Assert.assertNotNull(report.getApplicationResourceUsageReport()); Assert.assertEquals(report.getApplicationResourceUsageReport(),RMServerUtils.DUMMY_APPLICATION_RESOURCE_USAGE_REPORT); report=app.createAndGetApplicationReport("clientuser",true); Assert.assertNotNull(report.getApplicationResourceUsageReport()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testUnmanagedApp() throws IOException { ApplicationSubmissionContext subContext=new ApplicationSubmissionContextPBImpl(); subContext.setUnmanagedAM(true); LOG.info("--- START: testUnmanagedAppSuccessPath ---"); final String diagMsg="some diagnostics"; RMApp application=testCreateAppFinished(subContext,diagMsg); Assert.assertTrue("Finished app missing diagnostics",application.getDiagnostics().indexOf(diagMsg) != -1); reset(writer); LOG.info("--- START: testUnmanagedAppFailPath ---"); application=testCreateAppRunning(subContext); RMAppEvent event=new RMAppFailedAttemptEvent(application.getApplicationId(),RMAppEventType.ATTEMPT_FAILED,"",false); application.handle(event); rmDispatcher.await(); RMAppAttempt appAttempt=application.getCurrentAppAttempt(); Assert.assertEquals(1,appAttempt.getAppAttemptId().getAttemptId()); sendAppUpdateSavedEvent(application); assertFailed(application,".*Unmanaged application.*Failing the application.*"); assertAppFinalStateSaved(application); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAppFinishedFinished() throws IOException { LOG.info("--- START: testAppFinishedFinished ---"); RMApp application=testCreateAppFinished(null,""); RMAppEvent event=new RMAppEvent(application.getApplicationId(),RMAppEventType.KILL); application.handle(event); rmDispatcher.await(); assertTimesAtFinish(application); assertAppState(RMAppState.FINISHED,application); StringBuilder diag=application.getDiagnostics(); Assert.assertEquals("application diagnostics is not correct","",diag.toString()); verifyApplicationFinished(RMAppState.FINISHED); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.TestRMAppAttemptTransitions

    InternalCallVerifier EqualityVerifier 
    @Test public void testUnmanagedAMUnexpectedRegistration(){ unmanagedAM=true; when(submissionContext.getUnmanagedAM()).thenReturn(true); submitApplicationAttempt(); assertEquals(RMAppAttemptState.SUBMITTED,applicationAttempt.getAppAttemptState()); applicationAttempt.handle(new RMAppAttemptRegistrationEvent(applicationAttempt.getAppAttemptId(),"host",8042,"oldtrackingurl")); assertEquals(YarnApplicationAttemptState.SUBMITTED,applicationAttempt.createApplicationAttemptState()); testAppAttemptSubmittedToFailedState("Unmanaged AM must register after AM attempt reaches LAUNCHED state."); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSubmittedToKilled(){ submitApplicationAttempt(); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL)); assertEquals(YarnApplicationAttemptState.SUBMITTED,applicationAttempt.createApplicationAttemptState()); testAppAttemptKilledState(null,EMPTY_DIAGNOSTICS); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testRunningToFailed(){ Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false); String containerDiagMsg="some error"; int exitCode=123; ContainerStatus cs=BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,containerDiagMsg,exitCode); ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId(); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs)); assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState()); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,"",0))); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE)); assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState()); assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); assertEquals(0,applicationAttempt.getJustFinishedContainers().size()); assertEquals(amContainer,applicationAttempt.getMasterContainer()); assertEquals(0,application.getRanNodes().size()); String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl()); verifyAMHostAndPortInvalidated(); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); }

    InternalCallVerifier AssumptionSetter NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetClientToken() throws Exception { assumeTrue(isSecurityEnabled); Container amContainer=allocateApplicationAttempt(); Token token=applicationAttempt.createClientToken(null); Assert.assertNull(token); token=applicationAttempt.createClientToken("clientuser"); Assert.assertNull(token); launchApplicationAttempt(amContainer); token=applicationAttempt.createClientToken(null); Assert.assertNull(token); token=applicationAttempt.createClientToken("clientuser"); Assert.assertNotNull(token); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL)); assertEquals(YarnApplicationAttemptState.LAUNCHED,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); token=applicationAttempt.createClientToken(null); Assert.assertNull(token); token=applicationAttempt.createClientToken("clientuser"); Assert.assertNull(token); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainersCleanupForLastAttempt(){ applicationAttempt=new RMAppAttemptImpl(applicationAttempt.getAppAttemptId(),rmContext,scheduler,masterService,submissionContext,new Configuration(),true); when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true); when(submissionContext.getMaxAppAttempts()).thenReturn(1); Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false); ContainerStatus cs1=ContainerStatus.newInstance(amContainer.getId(),ContainerState.COMPLETE,"some error",123); ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId(); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs1)); assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); assertFalse(transferStateFromPreviousAttempt); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAMCrashAtScheduled(){ scheduleApplicationAttempt(); ContainerStatus cs=SchedulerUtils.createAbnormalContainerStatus(BuilderUtils.newContainerId(applicationAttempt.getAppAttemptId(),1),SchedulerUtils.LOST_CONTAINER); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),cs)); assertEquals(YarnApplicationAttemptState.SCHEDULED,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testRunningToKilled(){ Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL)); assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState()); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,"",0))); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE)); assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState()); assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.KILLED,applicationAttempt.getAppAttemptState()); assertEquals(0,applicationAttempt.getJustFinishedContainers().size()); assertEquals(amContainer,applicationAttempt.getMasterContainer()); assertEquals(0,application.getRanNodes().size()); String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl()); verifyTokenCount(applicationAttempt.getAppAttemptId(),1); verifyAMHostAndPortInvalidated(); verifyApplicationAttemptFinished(RMAppAttemptState.KILLED); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testScheduledToKilled(){ scheduleApplicationAttempt(); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL)); assertEquals(YarnApplicationAttemptState.SCHEDULED,applicationAttempt.createApplicationAttemptState()); testAppAttemptKilledState(null,EMPTY_DIAGNOSTICS); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAllocatedToKilled(){ Container amContainer=allocateApplicationAttempt(); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL)); assertEquals(YarnApplicationAttemptState.ALLOCATED,applicationAttempt.createApplicationAttemptState()); testAppAttemptKilledState(amContainer,EMPTY_DIAGNOSTICS); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testFailedToFailed(){ when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true); Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false); ContainerStatus cs1=ContainerStatus.newInstance(amContainer.getId(),ContainerState.COMPLETE,"some error",123); ApplicationAttemptId appAttemptId=applicationAttempt.getAppAttemptId(); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs1)); assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); assertTrue(transferStateFromPreviousAttempt); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); assertEquals(0,applicationAttempt.getJustFinishedContainers().size()); ContainerStatus cs2=ContainerStatus.newInstance(ContainerId.newInstance(appAttemptId,2),ContainerState.COMPLETE,"",0); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(appAttemptId,cs2)); assertEquals(1,applicationAttempt.getJustFinishedContainers().size()); assertEquals(cs2.getContainerId(),applicationAttempt.getJustFinishedContainers().get(0).getContainerId()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testFinalSavingToFinishedWithExpire(){ Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false); FinalApplicationStatus finalStatus=FinalApplicationStatus.SUCCEEDED; String trackingUrl="mytrackingurl"; String diagnostics="Successssseeeful"; applicationAttempt.handle(new RMAppAttemptUnregistrationEvent(applicationAttempt.getAppAttemptId(),trackingUrl,finalStatus,diagnostics)); assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState()); assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState()); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE)); assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); testAppAttemptFinishedState(amContainer,finalStatus,trackingUrl,diagnostics,0,false); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAllocatedToFailed(){ Container amContainer=allocateApplicationAttempt(); String diagnostics="Launch Failed"; applicationAttempt.handle(new RMAppAttemptLaunchFailedEvent(applicationAttempt.getAppAttemptId(),diagnostics)); assertEquals(YarnApplicationAttemptState.ALLOCATED,applicationAttempt.createApplicationAttemptState()); testAppAttemptFailedState(amContainer,diagnostics); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNewToKilled(){ applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.KILL)); assertEquals(YarnApplicationAttemptState.NEW,applicationAttempt.createApplicationAttemptState()); testAppAttemptKilledState(null,EMPTY_DIAGNOSTICS); verifyTokenCount(applicationAttempt.getAppAttemptId(),1); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testLaunchedExpire(){ Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE)); assertEquals(YarnApplicationAttemptState.LAUNCHED,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); assertTrue("expire diagnostics missing",applicationAttempt.getDiagnostics().contains("timed out")); String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl()); verifyTokenCount(applicationAttempt.getAppAttemptId(),1); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAMCrashAtAllocated(){ Container amContainer=allocateApplicationAttempt(); String containerDiagMsg="some error"; int exitCode=123; ContainerStatus cs=BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,containerDiagMsg,exitCode); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),cs)); assertEquals(YarnApplicationAttemptState.ALLOCATED,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); verifyTokenCount(applicationAttempt.getAppAttemptId(),1); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); boolean shouldCheckURL=(applicationAttempt.getTrackingUrl() != null); verifyAMCrashAtAllocatedDiagnosticInfo(applicationAttempt.getDiagnostics(),exitCode,shouldCheckURL); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=20000) public void testRunningExpire(){ Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false); applicationAttempt.handle(new RMAppAttemptEvent(applicationAttempt.getAppAttemptId(),RMAppAttemptEventType.EXPIRE)); assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertEquals(RMAppAttemptState.FAILED,applicationAttempt.getAppAttemptState()); assertTrue("expire diagnostics missing",applicationAttempt.getDiagnostics().contains("timed out")); String rmAppPageUrl=pjoin(RM_WEBAPP_ADDR,"cluster","app",applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl,applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl,applicationAttempt.getTrackingUrl()); verifyTokenCount(applicationAttempt.getAppAttemptId(),1); verifyAMHostAndPortInvalidated(); verifyApplicationAttemptFinished(RMAppAttemptState.FAILED); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testFinalSavingToFinishedWithContainerFinished(){ Container amContainer=allocateApplicationAttempt(); launchApplicationAttempt(amContainer); runApplicationAttempt(amContainer,"host",8042,"oldtrackingurl",false); FinalApplicationStatus finalStatus=FinalApplicationStatus.SUCCEEDED; String trackingUrl="mytrackingurl"; String diagnostics="Successful"; applicationAttempt.handle(new RMAppAttemptUnregistrationEvent(applicationAttempt.getAppAttemptId(),trackingUrl,finalStatus,diagnostics)); assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState()); assertEquals(YarnApplicationAttemptState.RUNNING,applicationAttempt.createApplicationAttemptState()); applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent(applicationAttempt.getAppAttemptId(),BuilderUtils.newContainerStatus(amContainer.getId(),ContainerState.COMPLETE,"",0))); assertEquals(RMAppAttemptState.FINAL_SAVING,applicationAttempt.getAppAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); testAppAttemptFinishedState(amContainer,finalStatus,trackingUrl,diagnostics,0,false); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testUnmanagedAMContainersCleanup(){ unmanagedAM=true; when(submissionContext.getUnmanagedAM()).thenReturn(true); when(submissionContext.getKeepContainersAcrossApplicationAttempts()).thenReturn(true); submitApplicationAttempt(); applicationAttempt.handle(new RMAppAttemptRegistrationEvent(applicationAttempt.getAppAttemptId(),"host",8042,"oldtrackingurl")); assertEquals(YarnApplicationAttemptState.SUBMITTED,applicationAttempt.createApplicationAttemptState()); sendAttemptUpdateSavedEvent(applicationAttempt); assertFalse(transferStateFromPreviousAttempt); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.TestRMContainerImpl

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testReleaseWhileRunning(){ DrainDispatcher drainDispatcher=new DrainDispatcher(); EventHandler appAttemptEventHandler=mock(EventHandler.class); EventHandler generic=mock(EventHandler.class); drainDispatcher.register(RMAppAttemptEventType.class,appAttemptEventHandler); drainDispatcher.register(RMNodeEventType.class,generic); drainDispatcher.init(new YarnConfiguration()); drainDispatcher.start(); NodeId nodeId=BuilderUtils.newNodeId("host",3425); ApplicationId appId=BuilderUtils.newApplicationId(1,1); ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1); ContainerId containerId=BuilderUtils.newContainerId(appAttemptId,1); ContainerAllocationExpirer expirer=mock(ContainerAllocationExpirer.class); Resource resource=BuilderUtils.newResource(512,1); Priority priority=BuilderUtils.newPriority(5); Container container=BuilderUtils.newContainer(containerId,nodeId,"host:3465",resource,priority,null); RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class); RMContext rmContext=mock(RMContext.class); when(rmContext.getDispatcher()).thenReturn(drainDispatcher); when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer); when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); RMContainer rmContainer=new RMContainerImpl(container,appAttemptId,nodeId,"user",rmContext); assertEquals(RMContainerState.NEW,rmContainer.getState()); assertEquals(resource,rmContainer.getAllocatedResource()); assertEquals(nodeId,rmContainer.getAllocatedNode()); assertEquals(priority,rmContainer.getAllocatedPriority()); verify(writer).containerStarted(any(RMContainer.class)); rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.START)); drainDispatcher.await(); assertEquals(RMContainerState.ALLOCATED,rmContainer.getState()); rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.ACQUIRED)); drainDispatcher.await(); assertEquals(RMContainerState.ACQUIRED,rmContainer.getState()); rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.LAUNCHED)); drainDispatcher.await(); assertEquals(RMContainerState.RUNNING,rmContainer.getState()); assertEquals("//host:3465/node/containerlogs/container_1_0001_01_000001/user",rmContainer.getLogURL()); reset(appAttemptEventHandler); ContainerStatus containerStatus=SchedulerUtils.createAbnormalContainerStatus(containerId,SchedulerUtils.RELEASED_CONTAINER); rmContainer.handle(new RMContainerFinishedEvent(containerId,containerStatus,RMContainerEventType.RELEASED)); drainDispatcher.await(); assertEquals(RMContainerState.RELEASED,rmContainer.getState()); assertEquals(SchedulerUtils.RELEASED_CONTAINER,rmContainer.getDiagnosticsInfo()); assertEquals(ContainerExitStatus.ABORTED,rmContainer.getContainerExitStatus()); assertEquals(ContainerState.COMPLETE,rmContainer.getContainerState()); verify(writer).containerFinished(any(RMContainer.class)); ArgumentCaptor captor=ArgumentCaptor.forClass(RMAppAttemptContainerFinishedEvent.class); verify(appAttemptEventHandler).handle(captor.capture()); RMAppAttemptContainerFinishedEvent cfEvent=captor.getValue(); assertEquals(appAttemptId,cfEvent.getApplicationAttemptId()); assertEquals(containerStatus,cfEvent.getContainerStatus()); assertEquals(RMAppAttemptEventType.CONTAINER_FINISHED,cfEvent.getType()); rmContainer.handle(new RMContainerFinishedEvent(containerId,SchedulerUtils.createAbnormalContainerStatus(containerId,"FinishedContainer"),RMContainerEventType.FINISHED)); assertEquals(RMContainerState.RELEASED,rmContainer.getState()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testExpireWhileRunning(){ DrainDispatcher drainDispatcher=new DrainDispatcher(); EventHandler appAttemptEventHandler=mock(EventHandler.class); EventHandler generic=mock(EventHandler.class); drainDispatcher.register(RMAppAttemptEventType.class,appAttemptEventHandler); drainDispatcher.register(RMNodeEventType.class,generic); drainDispatcher.init(new YarnConfiguration()); drainDispatcher.start(); NodeId nodeId=BuilderUtils.newNodeId("host",3425); ApplicationId appId=BuilderUtils.newApplicationId(1,1); ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1); ContainerId containerId=BuilderUtils.newContainerId(appAttemptId,1); ContainerAllocationExpirer expirer=mock(ContainerAllocationExpirer.class); Resource resource=BuilderUtils.newResource(512,1); Priority priority=BuilderUtils.newPriority(5); Container container=BuilderUtils.newContainer(containerId,nodeId,"host:3465",resource,priority,null); RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class); RMContext rmContext=mock(RMContext.class); when(rmContext.getDispatcher()).thenReturn(drainDispatcher); when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer); when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); RMContainer rmContainer=new RMContainerImpl(container,appAttemptId,nodeId,"user",rmContext); assertEquals(RMContainerState.NEW,rmContainer.getState()); assertEquals(resource,rmContainer.getAllocatedResource()); assertEquals(nodeId,rmContainer.getAllocatedNode()); assertEquals(priority,rmContainer.getAllocatedPriority()); verify(writer).containerStarted(any(RMContainer.class)); rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.START)); drainDispatcher.await(); assertEquals(RMContainerState.ALLOCATED,rmContainer.getState()); rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.ACQUIRED)); drainDispatcher.await(); assertEquals(RMContainerState.ACQUIRED,rmContainer.getState()); rmContainer.handle(new RMContainerEvent(containerId,RMContainerEventType.LAUNCHED)); drainDispatcher.await(); assertEquals(RMContainerState.RUNNING,rmContainer.getState()); assertEquals("//host:3465/node/containerlogs/container_1_0001_01_000001/user",rmContainer.getLogURL()); reset(appAttemptEventHandler); ContainerStatus containerStatus=SchedulerUtils.createAbnormalContainerStatus(containerId,SchedulerUtils.EXPIRED_CONTAINER); rmContainer.handle(new RMContainerFinishedEvent(containerId,containerStatus,RMContainerEventType.EXPIRE)); drainDispatcher.await(); assertEquals(RMContainerState.RUNNING,rmContainer.getState()); verify(writer,never()).containerFinished(any(RMContainer.class)); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerApplicationAttempt

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testMove(){ final String user="user1"; Queue parentQueue=createQueue("parent",null); Queue oldQueue=createQueue("old",parentQueue); Queue newQueue=createQueue("new",parentQueue); QueueMetrics parentMetrics=parentQueue.getMetrics(); QueueMetrics oldMetrics=oldQueue.getMetrics(); QueueMetrics newMetrics=newQueue.getMetrics(); ApplicationAttemptId appAttId=createAppAttemptId(0,0); RMContext rmContext=mock(RMContext.class); when(rmContext.getEpoch()).thenReturn(3); SchedulerApplicationAttempt app=new SchedulerApplicationAttempt(appAttId,user,oldQueue,oldQueue.getActiveUsersManager(),rmContext); oldMetrics.submitApp(user); assertEquals(app.getNewContainerId(),0x00c00001); Resource requestedResource=Resource.newInstance(1536,2); Priority requestedPriority=Priority.newInstance(2); ResourceRequest request=ResourceRequest.newInstance(requestedPriority,ResourceRequest.ANY,requestedResource,3); app.updateResourceRequests(Arrays.asList(request)); RMContainer container1=createRMContainer(appAttId,1,requestedResource); app.liveContainers.put(container1.getContainerId(),container1); SchedulerNode node=createNode(); app.appSchedulingInfo.allocate(NodeType.OFF_SWITCH,node,requestedPriority,request,container1.getContainer()); Priority prio1=Priority.newInstance(1); Resource reservedResource=Resource.newInstance(2048,3); RMContainer container2=createReservedRMContainer(appAttId,1,reservedResource,node.getNodeID(),prio1); Map reservations=new HashMap(); reservations.put(node.getNodeID(),container2); app.reservedContainers.put(prio1,reservations); oldMetrics.reserveResource(user,reservedResource); checkQueueMetrics(oldMetrics,1,1,1536,2,2048,3,3072,4); checkQueueMetrics(newMetrics,0,0,0,0,0,0,0,0); checkQueueMetrics(parentMetrics,1,1,1536,2,2048,3,3072,4); app.move(newQueue); checkQueueMetrics(oldMetrics,0,0,0,0,0,0,0,0); checkQueueMetrics(newMetrics,1,1,1536,2,2048,3,3072,4); checkQueueMetrics(parentMetrics,1,1,1536,2,2048,3,3072,4); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testNormalizeRequest(){ ResourceCalculator resourceCalculator=new DefaultResourceCalculator(); final int minMemory=1024; final int maxMemory=8192; Resource minResource=Resources.createResource(minMemory,0); Resource maxResource=Resources.createResource(maxMemory,0); ResourceRequest ask=new ResourceRequestPBImpl(); ask.setCapability(Resources.createResource(-1024)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource); assertEquals(minMemory,ask.getCapability().getMemory()); ask.setCapability(Resources.createResource(0)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource); assertEquals(minMemory,ask.getCapability().getMemory()); ask.setCapability(Resources.createResource(2 * minMemory)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource); assertEquals(2 * minMemory,ask.getCapability().getMemory()); ask.setCapability(Resources.createResource(minMemory + 10)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource); assertEquals(2 * minMemory,ask.getCapability().getMemory()); ask.setCapability(Resources.createResource(maxMemory)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource); assertEquals(maxMemory,ask.getCapability().getMemory()); ask.setCapability(Resources.createResource(maxMemory - 10)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource); assertEquals(maxMemory,ask.getCapability().getMemory()); maxResource=Resources.createResource(maxMemory - 10,0); ask.setCapability(Resources.createResource(maxMemory - 100)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource); assertEquals(maxResource.getMemory(),ask.getCapability().getMemory()); maxResource=Resources.createResource(maxMemory,0); ask.setCapability(Resources.createResource(maxMemory + 100)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,null,minResource,maxResource); assertEquals(maxResource.getMemory(),ask.getCapability().getMemory()); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testNormalizeRequestWithDominantResourceCalculator(){ ResourceCalculator resourceCalculator=new DominantResourceCalculator(); Resource minResource=Resources.createResource(1024,1); Resource maxResource=Resources.createResource(10240,10); Resource clusterResource=Resources.createResource(10 * 1024,10); ResourceRequest ask=new ResourceRequestPBImpl(); ask.setCapability(Resources.createResource(-1024,-1)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,clusterResource,minResource,maxResource); assertEquals(minResource,ask.getCapability()); ask.setCapability(Resources.createResource(0,0)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,clusterResource,minResource,maxResource); assertEquals(minResource,ask.getCapability()); assertEquals(1,ask.getCapability().getVirtualCores()); assertEquals(1024,ask.getCapability().getMemory()); ask.setCapability(Resources.createResource(1536,0)); SchedulerUtils.normalizeRequest(ask,resourceCalculator,clusterResource,minResource,maxResource); assertEquals(Resources.createResource(2048,1),ask.getCapability()); assertEquals(1,ask.getCapability().getVirtualCores()); assertEquals(2048,ask.getCapability().getMemory()); }

    EqualityVerifier 
    @Test public void testCreateAbnormalContainerStatus(){ ContainerStatus cd=SchedulerUtils.createAbnormalContainerStatus(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(System.currentTimeMillis(),1),1),1),"x"); Assert.assertEquals(ContainerExitStatus.ABORTED,cd.getExitStatus()); }

    EqualityVerifier 
    @Test public void testCreatePreemptedContainerStatus(){ ContainerStatus cd=SchedulerUtils.createPreemptedContainerStatus(ContainerId.newInstance(ApplicationAttemptId.newInstance(ApplicationId.newInstance(System.currentTimeMillis(),1),1),1),"x"); Assert.assertEquals(ContainerExitStatus.PREEMPTED,cd.getExitStatus()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestApplicationLimits

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testLimitsComputation() throws Exception { CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); setupQueueConfiguration(csConf); YarnConfiguration conf=new YarnConfiguration(); CapacitySchedulerContext csContext=mock(CapacitySchedulerContext.class); when(csContext.getConfiguration()).thenReturn(csConf); when(csContext.getConf()).thenReturn(conf); when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB,1)); when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB,16)); when(csContext.getApplicationComparator()).thenReturn(CapacityScheduler.applicationComparator); when(csContext.getQueueComparator()).thenReturn(CapacityScheduler.queueComparator); when(csContext.getResourceCalculator()).thenReturn(resourceCalculator); Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 16); when(csContext.getClusterResource()).thenReturn(clusterResource); Map queues=new HashMap(); CSQueue root=CapacityScheduler.parseQueue(csContext,csConf,null,"root",queues,queues,TestUtils.spyHook); LeafQueue queue=(LeafQueue)queues.get(A); LOG.info("Queue 'A' -" + " maxActiveApplications=" + queue.getMaximumActiveApplications() + " maxActiveApplicationsPerUser="+ queue.getMaximumActiveApplicationsPerUser()); int expectedMaxActiveApps=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()) * queue.getAbsoluteMaximumCapacity())); assertEquals(expectedMaxActiveApps,queue.getMaximumActiveApplications()); int expectedMaxActiveAppsUsingAbsCap=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePercent() * queue.getAbsoluteCapacity())); assertEquals((int)Math.ceil(expectedMaxActiveAppsUsingAbsCap * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor()),queue.getMaximumActiveApplicationsPerUser()); assertEquals((int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),queue.getMetrics().getAvailableMB()); clusterResource=Resources.createResource(120 * 16 * GB); root.updateClusterResource(clusterResource); expectedMaxActiveApps=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()) * queue.getAbsoluteMaximumCapacity())); assertEquals(expectedMaxActiveApps,queue.getMaximumActiveApplications()); expectedMaxActiveAppsUsingAbsCap=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePercent() * queue.getAbsoluteCapacity())); assertEquals((int)Math.ceil(expectedMaxActiveAppsUsingAbsCap * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor()),queue.getMaximumActiveApplicationsPerUser()); assertEquals((int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()),queue.getMetrics().getAvailableMB()); assertEquals((int)CapacitySchedulerConfiguration.UNDEFINED,csConf.getMaximumApplicationsPerQueue(queue.getQueuePath())); int expectedMaxApps=(int)(CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_SYSTEM_APPLICATIIONS * queue.getAbsoluteCapacity()); assertEquals(expectedMaxApps,queue.getMaxApplications()); int expectedMaxAppsPerUser=(int)(expectedMaxApps * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor()); assertEquals(expectedMaxAppsPerUser,queue.getMaxApplicationsPerUser()); assertEquals((long)CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT,(long)csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath())); csConf.setFloat("yarn.scheduler.capacity." + queue.getQueuePath() + ".maximum-am-resource-percent",0.5f); queues=new HashMap(); root=CapacityScheduler.parseQueue(csContext,csConf,null,"root",queues,queues,TestUtils.spyHook); clusterResource=Resources.createResource(100 * 16 * GB); queue=(LeafQueue)queues.get(A); expectedMaxActiveApps=Math.max(1,(int)Math.ceil(((float)clusterResource.getMemory() / (1 * GB)) * csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath()) * queue.getAbsoluteMaximumCapacity())); assertEquals((long)0.5,(long)csConf.getMaximumApplicationMasterResourcePerQueuePercent(queue.getQueuePath())); assertEquals(expectedMaxActiveApps,queue.getMaximumActiveApplications()); csConf.setInt("yarn.scheduler.capacity." + queue.getQueuePath() + ".maximum-applications",9999); queues=new HashMap(); root=CapacityScheduler.parseQueue(csContext,csConf,null,"root",queues,queues,TestUtils.spyHook); queue=(LeafQueue)queues.get(A); assertEquals(9999,(int)csConf.getMaximumApplicationsPerQueue(queue.getQueuePath())); assertEquals(9999,queue.getMaxApplications()); expectedMaxAppsPerUser=(int)(9999 * (queue.getUserLimit() / 100.0f) * queue.getUserLimitFactor()); assertEquals(expectedMaxAppsPerUser,queue.getMaxApplicationsPerUser()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testActiveApplicationLimits() throws Exception { final String user_0="user_0"; final String user_1="user_1"; int APPLICATION_ID=0; FiCaSchedulerApp app_0=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_0,user_0); assertEquals(1,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(1,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); FiCaSchedulerApp app_1=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_1,user_0); assertEquals(2,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); FiCaSchedulerApp app_2=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_2,user_0); assertEquals(2,queue.getNumActiveApplications()); assertEquals(1,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(1,queue.getNumPendingApplications(user_0)); queue.finishApplicationAttempt(app_0,A); assertEquals(2,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); FiCaSchedulerApp app_3=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_3,user_0); assertEquals(2,queue.getNumActiveApplications()); assertEquals(1,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(1,queue.getNumPendingApplications(user_0)); doReturn(3).when(queue).getMaximumActiveApplications(); FiCaSchedulerApp app_4=getMockApplication(APPLICATION_ID++,user_1); queue.submitApplicationAttempt(app_4,user_1); assertEquals(3,queue.getNumActiveApplications()); assertEquals(1,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(1,queue.getNumPendingApplications(user_0)); assertEquals(1,queue.getNumActiveApplications(user_1)); assertEquals(0,queue.getNumPendingApplications(user_1)); FiCaSchedulerApp app_5=getMockApplication(APPLICATION_ID++,user_1); queue.submitApplicationAttempt(app_5,user_1); assertEquals(3,queue.getNumActiveApplications()); assertEquals(2,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(1,queue.getNumPendingApplications(user_0)); assertEquals(1,queue.getNumActiveApplications(user_1)); assertEquals(1,queue.getNumPendingApplications(user_1)); queue.finishApplicationAttempt(app_4,A); assertEquals(3,queue.getNumActiveApplications()); assertEquals(1,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(1,queue.getNumPendingApplications(user_0)); assertEquals(1,queue.getNumActiveApplications(user_1)); assertEquals(0,queue.getNumPendingApplications(user_1)); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testActiveLimitsWithKilledApps() throws Exception { final String user_0="user_0"; int APPLICATION_ID=0; doReturn(2).when(queue).getMaximumActiveApplications(); FiCaSchedulerApp app_0=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_0,user_0); assertEquals(1,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(1,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); assertTrue(queue.activeApplications.contains(app_0)); FiCaSchedulerApp app_1=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_1,user_0); assertEquals(2,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); assertTrue(queue.activeApplications.contains(app_1)); FiCaSchedulerApp app_2=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_2,user_0); assertEquals(2,queue.getNumActiveApplications()); assertEquals(1,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(1,queue.getNumPendingApplications(user_0)); assertTrue(queue.pendingApplications.contains(app_2)); FiCaSchedulerApp app_3=getMockApplication(APPLICATION_ID++,user_0); queue.submitApplicationAttempt(app_3,user_0); assertEquals(2,queue.getNumActiveApplications()); assertEquals(2,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(2,queue.getNumPendingApplications(user_0)); assertTrue(queue.pendingApplications.contains(app_3)); queue.finishApplicationAttempt(app_2,A); assertEquals(2,queue.getNumActiveApplications()); assertEquals(1,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(1,queue.getNumPendingApplications(user_0)); assertFalse(queue.pendingApplications.contains(app_2)); assertFalse(queue.activeApplications.contains(app_2)); queue.finishApplicationAttempt(app_0,A); assertEquals(2,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(2,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); assertTrue(queue.activeApplications.contains(app_3)); assertFalse(queue.pendingApplications.contains(app_3)); assertFalse(queue.activeApplications.contains(app_0)); queue.finishApplicationAttempt(app_1,A); assertEquals(1,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(1,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); assertFalse(queue.activeApplications.contains(app_1)); queue.finishApplicationAttempt(app_3,A); assertEquals(0,queue.getNumActiveApplications()); assertEquals(0,queue.getNumPendingApplications()); assertEquals(0,queue.getNumActiveApplications(user_0)); assertEquals(0,queue.getNumPendingApplications(user_0)); assertFalse(queue.activeApplications.contains(app_3)); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCSQueueUtils

    InternalCallVerifier EqualityVerifier 
    @Test public void testAbsoluteMaxAvailCapacityNoUse() throws Exception { ResourceCalculator resourceCalculator=new DefaultResourceCalculator(); Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 32); YarnConfiguration conf=new YarnConfiguration(); CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); CapacitySchedulerContext csContext=mock(CapacitySchedulerContext.class); when(csContext.getConf()).thenReturn(conf); when(csContext.getConfiguration()).thenReturn(csConf); when(csContext.getClusterResource()).thenReturn(clusterResource); when(csContext.getResourceCalculator()).thenReturn(resourceCalculator); when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB,1)); when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB,32)); final String L1Q1="L1Q1"; csConf.setQueues(CapacitySchedulerConfiguration.ROOT,new String[]{L1Q1}); final String L1Q1P=CapacitySchedulerConfiguration.ROOT + "." + L1Q1; csConf.setCapacity(L1Q1P,90); csConf.setMaximumCapacity(L1Q1P,90); ParentQueue root=new ParentQueue(csContext,CapacitySchedulerConfiguration.ROOT,null,null); LeafQueue l1q1=new LeafQueue(csContext,L1Q1,root,null); LOG.info("t1 root " + CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,root)); LOG.info("t1 l1q1 " + CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l1q1)); assertEquals(1.0f,CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,root),0.000001f); assertEquals(0.9f,CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l1q1),0.000001f); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAbsoluteMaxAvailCapacityWithUse() throws Exception { ResourceCalculator resourceCalculator=new DefaultResourceCalculator(); Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 32); YarnConfiguration conf=new YarnConfiguration(); CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); CapacitySchedulerContext csContext=mock(CapacitySchedulerContext.class); when(csContext.getConf()).thenReturn(conf); when(csContext.getConfiguration()).thenReturn(csConf); when(csContext.getClusterResource()).thenReturn(clusterResource); when(csContext.getResourceCalculator()).thenReturn(resourceCalculator); when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB,1)); when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB,32)); final String L1Q1="L1Q1"; final String L1Q2="L1Q2"; final String L2Q1="L2Q1"; final String L2Q2="L2Q2"; csConf.setQueues(CapacitySchedulerConfiguration.ROOT,new String[]{L1Q1,L1Q2,L2Q1,L2Q2}); final String L1Q1P=CapacitySchedulerConfiguration.ROOT + "." + L1Q1; csConf.setCapacity(L1Q1P,80); csConf.setMaximumCapacity(L1Q1P,80); final String L1Q2P=CapacitySchedulerConfiguration.ROOT + "." + L1Q2; csConf.setCapacity(L1Q2P,20); csConf.setMaximumCapacity(L1Q2P,100); final String L2Q1P=L1Q1P + "." + L2Q1; csConf.setCapacity(L2Q1P,50); csConf.setMaximumCapacity(L2Q1P,50); final String L2Q2P=L1Q1P + "." + L2Q2; csConf.setCapacity(L2Q2P,50); csConf.setMaximumCapacity(L2Q2P,50); float result; ParentQueue root=new ParentQueue(csContext,CapacitySchedulerConfiguration.ROOT,null,null); LeafQueue l1q1=new LeafQueue(csContext,L1Q1,root,null); LeafQueue l1q2=new LeafQueue(csContext,L1Q2,root,null); LeafQueue l2q2=new LeafQueue(csContext,L2Q2,l1q1,null); LeafQueue l2q1=new LeafQueue(csContext,L2Q1,l1q1,null); result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2); assertEquals(0.4f,result,0.000001f); LOG.info("t2 l2q2 " + result); Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.1f)); Resources.addTo(l1q2.getUsedResources(),Resources.multiply(clusterResource,0.1f)); result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2); assertEquals(0.4f,result,0.000001f); LOG.info("t2 l2q2 " + result); Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.3f)); Resources.addTo(l1q2.getUsedResources(),Resources.multiply(clusterResource,0.3f)); result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2); assertEquals(0.3f,result,0.000001f); LOG.info("t2 l2q2 " + result); Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.1f)); Resources.addTo(l1q1.getUsedResources(),Resources.multiply(clusterResource,0.1f)); result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2); assertEquals(0.3f,result,0.000001f); LOG.info("t2 l2q2 " + result); Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.2f)); Resources.addTo(l1q1.getUsedResources(),Resources.multiply(clusterResource,0.2f)); Resources.addTo(l2q1.getUsedResources(),Resources.multiply(clusterResource,0.2f)); result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2); assertEquals(0.3f,result,0.000001f); LOG.info("t2 l2q2 " + result); Resources.addTo(root.getUsedResources(),Resources.multiply(clusterResource,0.2f)); Resources.addTo(l1q1.getUsedResources(),Resources.multiply(clusterResource,0.2f)); Resources.addTo(l2q1.getUsedResources(),Resources.multiply(clusterResource,0.2f)); result=CSQueueUtils.getAbsoluteMaxAvailCapacity(resourceCalculator,clusterResource,l2q2); assertEquals(0.1f,result,0.000001f); LOG.info("t2 l2q2 " + result); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testAllocateDoesNotBlockOnSchedulerLock() throws Exception { final YarnConfiguration conf=new YarnConfiguration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class); MyContainerManager containerManager=new MyContainerManager(); final MockRMWithAMS rm=new MockRMWithAMS(conf,containerManager); rm.start(); MockNM nm1=rm.registerNode("localhost:1234",5120); Map acls=new HashMap(2); acls.put(ApplicationAccessType.VIEW_APP,"*"); RMApp app=rm.submitApp(1024,"appname","appuser",acls); nm1.nodeHeartbeat(true); RMAppAttempt attempt=app.getCurrentAppAttempt(); ApplicationAttemptId applicationAttemptId=attempt.getAppAttemptId(); int msecToWait=10000; int msecToSleep=100; while (attempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED && msecToWait > 0) { LOG.info("Waiting for AppAttempt to reach LAUNCHED state. " + "Current state is " + attempt.getAppAttemptState()); Thread.sleep(msecToSleep); msecToWait-=msecToSleep; } Assert.assertEquals(attempt.getAppAttemptState(),RMAppAttemptState.LAUNCHED); final YarnRPC rpc=YarnRPC.create(conf); UserGroupInformation currentUser=UserGroupInformation.createRemoteUser(applicationAttemptId.toString()); Credentials credentials=containerManager.getContainerCredentials(); final InetSocketAddress rmBindAddress=rm.getApplicationMasterService().getBindAddress(); Token amRMToken=MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,credentials.getAllTokens()); currentUser.addToken(amRMToken); ApplicationMasterProtocol client=currentUser.doAs(new PrivilegedAction(){ @Override public ApplicationMasterProtocol run(){ return (ApplicationMasterProtocol)rpc.getProxy(ApplicationMasterProtocol.class,rmBindAddress,conf); } } ); RegisterApplicationMasterRequest request=RegisterApplicationMasterRequest.newInstance("localhost",12345,""); client.registerApplicationMaster(request); final CapacityScheduler cs=(CapacityScheduler)rm.getResourceScheduler(); final CyclicBarrier barrier=new CyclicBarrier(2); Thread otherThread=new Thread(new Runnable(){ @Override public void run(){ synchronized (cs) { try { barrier.await(); barrier.await(); } catch ( InterruptedException e) { e.printStackTrace(); } catch ( BrokenBarrierException e) { e.printStackTrace(); } } } } ); otherThread.start(); barrier.await(); AllocateRequest allocateRequest=AllocateRequest.newInstance(0,0.0f,null,null,null); client.allocate(allocateRequest); barrier.await(); otherThread.join(); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testReconnectedNode() throws Exception { CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); setupQueueConfiguration(csConf); CapacityScheduler cs=new CapacityScheduler(); cs.setConf(new YarnConfiguration()); cs.setRMContext(resourceManager.getRMContext()); cs.init(csConf); cs.start(); cs.reinitialize(csConf,new RMContextImpl(null,null,null,null,null,null,new RMContainerTokenSecretManager(csConf),new NMTokenSecretManagerInRM(csConf),new ClientToAMTokenSecretManagerInRM(),null)); RMNode n1=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1); RMNode n2=MockNodes.newNodeInfo(0,MockNodes.newResource(2 * GB),2); cs.handle(new NodeAddedSchedulerEvent(n1)); cs.handle(new NodeAddedSchedulerEvent(n2)); Assert.assertEquals(6 * GB,cs.getClusterResource().getMemory()); n1=MockNodes.newNodeInfo(0,MockNodes.newResource(2 * GB),1); cs.handle(new NodeRemovedSchedulerEvent(n1)); cs.handle(new NodeAddedSchedulerEvent(n1)); Assert.assertEquals(4 * GB,cs.getClusterResource().getMemory()); cs.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testRefreshQueuesWithNewQueue() throws Exception { CapacityScheduler cs=new CapacityScheduler(); CapacitySchedulerConfiguration conf=new CapacitySchedulerConfiguration(); setupQueueConfiguration(conf); cs.setConf(new YarnConfiguration()); cs.setRMContext(resourceManager.getRMContext()); cs.init(conf); cs.start(); cs.reinitialize(conf,new RMContextImpl(null,null,null,null,null,null,new RMContainerTokenSecretManager(conf),new NMTokenSecretManagerInRM(conf),new ClientToAMTokenSecretManagerInRM(),null)); checkQueueCapacities(cs,A_CAPACITY,B_CAPACITY); String B4=B + ".b4"; float B4_CAPACITY=10; B3_CAPACITY-=B4_CAPACITY; try { conf.setCapacity(A,80f); conf.setCapacity(B,20f); conf.setQueues(B,new String[]{"b1","b2","b3","b4"}); conf.setCapacity(B1,B1_CAPACITY); conf.setCapacity(B2,B2_CAPACITY); conf.setCapacity(B3,B3_CAPACITY); conf.setCapacity(B4,B4_CAPACITY); cs.reinitialize(conf,mockContext); checkQueueCapacities(cs,80f,20f); CSQueue rootQueue=cs.getRootQueue(); CSQueue queueB=findQueue(rootQueue,B); CSQueue queueB4=findQueue(queueB,B4); assertEquals(queueB,queueB4.getParent()); } finally { B3_CAPACITY+=B4_CAPACITY; cs.stop(); } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMoveAppSameParent() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("a1")); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); List appsInA2=scheduler.getAppsInQueue("a2"); assertTrue(appsInA2.isEmpty()); scheduler.moveApplication(app.getApplicationId(),"a2"); appsInA2=scheduler.getAppsInQueue("a2"); assertEquals(1,appsInA2.size()); queue=scheduler.getApplicationAttempt(appsInA2.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("a2")); appsInA1=scheduler.getAppsInQueue("a1"); assertTrue(appsInA1.isEmpty()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testMoveAppQueueMetricsCheck() throws Exception { ResourceScheduler scheduler=resourceManager.getResourceScheduler(); String host_0="host_0"; NodeManager nm_0=registerNode(host_0,1234,2345,NetworkTopology.DEFAULT_RACK,Resources.createResource(5 * GB,1)); String host_1="host_1"; NodeManager nm_1=registerNode(host_1,1234,2345,NetworkTopology.DEFAULT_RACK,Resources.createResource(5 * GB,1)); Priority priority_0=org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(0); Priority priority_1=org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.create(1); Application application_0=new Application("user_0","a1",resourceManager); application_0.submit(); application_0.addNodeManager(host_0,1234,nm_0); application_0.addNodeManager(host_1,1234,nm_1); Resource capability_0_0=Resources.createResource(3 * GB,1); application_0.addResourceRequestSpec(priority_1,capability_0_0); Resource capability_0_1=Resources.createResource(2 * GB,1); application_0.addResourceRequestSpec(priority_0,capability_0_1); Task task_0_0=new Task(application_0,priority_1,new String[]{host_0,host_1}); application_0.addTask(task_0_0); Application application_1=new Application("user_1","b2",resourceManager); application_1.submit(); application_1.addNodeManager(host_0,1234,nm_0); application_1.addNodeManager(host_1,1234,nm_1); Resource capability_1_0=Resources.createResource(1 * GB,1); application_1.addResourceRequestSpec(priority_1,capability_1_0); Resource capability_1_1=Resources.createResource(2 * GB,1); application_1.addResourceRequestSpec(priority_0,capability_1_1); Task task_1_0=new Task(application_1,priority_1,new String[]{host_0,host_1}); application_1.addTask(task_1_0); application_0.schedule(); application_1.schedule(); nodeUpdate(nm_0); nodeUpdate(nm_1); CapacityScheduler cs=(CapacityScheduler)resourceManager.getResourceScheduler(); CSQueue origRootQ=cs.getRootQueue(); CapacitySchedulerInfo oldInfo=new CapacitySchedulerInfo(origRootQ); int origNumAppsA=getNumAppsInQueue("a",origRootQ.getChildQueues()); int origNumAppsRoot=origRootQ.getNumApplications(); scheduler.moveApplication(application_0.getApplicationId(),"a2"); CSQueue newRootQ=cs.getRootQueue(); int newNumAppsA=getNumAppsInQueue("a",newRootQ.getChildQueues()); int newNumAppsRoot=newRootQ.getNumApplications(); CapacitySchedulerInfo newInfo=new CapacitySchedulerInfo(newRootQ); CapacitySchedulerLeafQueueInfo origOldA1=(CapacitySchedulerLeafQueueInfo)getQueueInfo("a1",oldInfo.getQueues()); CapacitySchedulerLeafQueueInfo origNewA1=(CapacitySchedulerLeafQueueInfo)getQueueInfo("a1",newInfo.getQueues()); CapacitySchedulerLeafQueueInfo targetOldA2=(CapacitySchedulerLeafQueueInfo)getQueueInfo("a2",oldInfo.getQueues()); CapacitySchedulerLeafQueueInfo targetNewA2=(CapacitySchedulerLeafQueueInfo)getQueueInfo("a2",newInfo.getQueues()); assertEquals(1,origOldA1.getNumApplications()); assertEquals(1,origNumAppsA); assertEquals(2,origNumAppsRoot); assertEquals(0,origNewA1.getNumApplications()); assertEquals(1,newNumAppsA); assertEquals(2,newNumAppsRoot); assertEquals(3 * GB,origOldA1.getResourcesUsed().getMemory()); assertEquals(1,origOldA1.getResourcesUsed().getvCores()); assertEquals(0,origNewA1.getResourcesUsed().getMemory()); assertEquals(0,origNewA1.getResourcesUsed().getvCores()); assertEquals(3 * GB,targetNewA2.getResourcesUsed().getMemory()); assertEquals(1,targetNewA2.getResourcesUsed().getvCores()); assertEquals(0,targetOldA2.getNumApplications()); assertEquals(0,targetOldA2.getResourcesUsed().getMemory()); assertEquals(0,targetOldA2.getResourcesUsed().getvCores()); assertEquals(1,targetNewA2.getNumApplications()); assertEquals(1,origOldA1.getNumContainers()); assertEquals(0,origNewA1.getNumContainers()); assertEquals(1,targetNewA2.getNumContainers()); assertEquals(0,targetOldA2.getNumContainers()); assertEquals(3 * GB,origOldA1.getUsers().getUsersList().get(0).getResourcesUsed().getMemory()); assertEquals(1,origOldA1.getUsers().getUsersList().get(0).getResourcesUsed().getvCores()); assertEquals(0,origNewA1.getUsers().getUsersList().size()); assertEquals(3 * GB,targetNewA2.getUsers().getUsersList().get(0).getResourcesUsed().getMemory()); assertEquals(1,targetNewA2.getUsers().getUsersList().get(0).getResourcesUsed().getvCores()); application_0.schedule(); checkApplicationResourceUsage(3 * GB,application_0); application_1.schedule(); checkApplicationResourceUsage(1 * GB,application_1); checkNodeResourceUsage(4 * GB,nm_0); checkNodeResourceUsage(0 * GB,nm_1); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMoveAllApps() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("a1")); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); List appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); List appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); scheduler.moveAllApps("a1","b1"); Thread.sleep(1000); appsInB1=scheduler.getAppsInQueue("b1"); assertEquals(1,appsInB1.size()); queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("b1")); appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.contains(appAttemptId)); assertEquals(1,appsInB.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); appsInA1=scheduler.getAppsInQueue("a1"); assertTrue(appsInA1.isEmpty()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.isEmpty()); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testKillAllAppsInQueue() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("a1")); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); scheduler.killAllAppsInQueue("a1"); rm.waitForState(app.getApplicationId(),RMAppState.KILLED); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.isEmpty()); appsInA1=scheduler.getAppsInQueue("a1"); assertTrue(appsInA1.isEmpty()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.isEmpty()); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testMaximumCapacitySetup(){ float delta=0.0000001f; CapacitySchedulerConfiguration conf=new CapacitySchedulerConfiguration(); assertEquals(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE,conf.getMaximumCapacity(A),delta); conf.setMaximumCapacity(A,50.0f); assertEquals(50.0f,conf.getMaximumCapacity(A),delta); conf.setMaximumCapacity(A,-1); assertEquals(CapacitySchedulerConfiguration.MAXIMUM_CAPACITY_VALUE,conf.getMaximumCapacity(A),delta); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNumClusterNodes() throws Exception { YarnConfiguration conf=new YarnConfiguration(); CapacityScheduler cs=new CapacityScheduler(); cs.setConf(conf); RMContextImpl rmContext=new RMContextImpl(null,null,null,null,null,null,new RMContainerTokenSecretManager(conf),new NMTokenSecretManagerInRM(conf),new ClientToAMTokenSecretManagerInRM(),null); cs.setRMContext(rmContext); CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); setupQueueConfiguration(csConf); cs.init(csConf); cs.start(); assertEquals(0,cs.getNumClusterNodes()); RMNode n1=MockNodes.newNodeInfo(0,MockNodes.newResource(4 * GB),1); RMNode n2=MockNodes.newNodeInfo(0,MockNodes.newResource(2 * GB),2); cs.handle(new NodeAddedSchedulerEvent(n1)); cs.handle(new NodeAddedSchedulerEvent(n2)); assertEquals(2,cs.getNumClusterNodes()); cs.handle(new NodeRemovedSchedulerEvent(n1)); assertEquals(1,cs.getNumClusterNodes()); cs.handle(new NodeAddedSchedulerEvent(n1)); assertEquals(2,cs.getNumClusterNodes()); cs.handle(new NodeRemovedSchedulerEvent(n2)); cs.handle(new NodeRemovedSchedulerEvent(n1)); assertEquals(0,cs.getNumClusterNodes()); cs.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMoveAppBasic() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); String queue=scheduler.getApplicationAttempt(appsInA1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("a1")); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); List appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); List appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); scheduler.moveApplication(app.getApplicationId(),"b1"); appsInB1=scheduler.getAppsInQueue("b1"); assertEquals(1,appsInB1.size()); queue=scheduler.getApplicationAttempt(appsInB1.get(0)).getQueue().getQueueName(); Assert.assertTrue(queue.equals("b1")); appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.contains(appAttemptId)); assertEquals(1,appsInB.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); appsInA1=scheduler.getAppsInQueue("a1"); assertTrue(appsInA1.isEmpty()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.isEmpty()); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAddAndRemoveAppFromCapacityScheduler() throws Exception { CapacitySchedulerConfiguration conf=new CapacitySchedulerConfiguration(); setupQueueConfiguration(conf); conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class); MockRM rm=new MockRM(conf); @SuppressWarnings("unchecked") AbstractYarnScheduler cs=(AbstractYarnScheduler)rm.getResourceScheduler(); SchedulerApplication app=TestSchedulerUtils.verifyAppAddedAndRemovedFromScheduler(cs.getSchedulerApplications(),cs,"a1"); Assert.assertEquals("a1",app.getQueue().getQueueName()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=30000) public void testRecoverRequestAfterPreemption() throws Exception { Configuration conf=new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER,CapacityScheduler.class,ResourceScheduler.class); MockRM rm1=new MockRM(conf); rm1.start(); MockNM nm1=rm1.registerNode("127.0.0.1:1234",8000); RMApp app1=rm1.submitApp(1024); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); CapacityScheduler cs=(CapacityScheduler)rm1.getResourceScheduler(); am1.allocate("127.0.0.1",1024,1,new ArrayList()); ContainerId containerId1=ContainerId.newInstance(am1.getApplicationAttemptId(),2); rm1.waitForState(nm1,containerId1,RMContainerState.ALLOCATED); RMContainer rmContainer=cs.getRMContainer(containerId1); List requests=rmContainer.getResourceRequests(); FiCaSchedulerApp app=cs.getApplicationAttempt(am1.getApplicationAttemptId()); FiCaSchedulerNode node=cs.getNode(rmContainer.getAllocatedNode()); for ( ResourceRequest request : requests) { if (request.getResourceName().equals(node.getRackName()) || request.getResourceName().equals(ResourceRequest.ANY)) { continue; } Assert.assertNull(app.getResourceRequest(request.getPriority(),request.getResourceName())); } cs.killContainer(rmContainer); Assert.assertEquals(3,requests.size()); for ( ResourceRequest request : requests) { Assert.assertEquals(1,app.getResourceRequest(request.getPriority(),request.getResourceName()).getNumContainers()); } ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),3); rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED); List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); Assert.assertTrue(containers.size() == 1); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMoveAllAppsInvalidDestination() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); List appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); List appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); try { scheduler.moveAllApps("a1","DOES_NOT_EXIST"); Assert.fail(); } catch ( YarnException e) { } appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); rm.stop(); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testCapacitySchedulerInfo() throws Exception { QueueInfo queueInfo=resourceManager.getResourceScheduler().getQueueInfo("a",true,true); Assert.assertEquals(queueInfo.getQueueName(),"a"); Assert.assertEquals(queueInfo.getChildQueues().size(),2); List userACLInfo=resourceManager.getResourceScheduler().getQueueUserAclInfo(); Assert.assertNotNull(userACLInfo); for ( QueueUserACLInfo queueUserACLInfo : userACLInfo) { Assert.assertEquals(getQueueCount(userACLInfo,queueUserACLInfo.getQueueName()),1); } }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMoveAllAppsInvalidSource() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); List appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); List appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); try { scheduler.moveAllApps("DOES_NOT_EXIST","b1"); Assert.fail(); } catch ( YarnException e) { } appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); appsInB1=scheduler.getAppsInQueue("b1"); assertTrue(appsInB1.isEmpty()); appsInB=scheduler.getAppsInQueue("b"); assertTrue(appsInB.isEmpty()); rm.stop(); }

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetAppsInQueue() throws Exception { Application application_0=new Application("user_0","a1",resourceManager); application_0.submit(); Application application_1=new Application("user_0","a2",resourceManager); application_1.submit(); Application application_2=new Application("user_0","b2",resourceManager); application_2.submit(); ResourceScheduler scheduler=resourceManager.getResourceScheduler(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(application_0.getApplicationAttemptId())); assertTrue(appsInA.contains(application_1.getApplicationAttemptId())); assertEquals(2,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(application_0.getApplicationAttemptId())); assertTrue(appsInRoot.contains(application_1.getApplicationAttemptId())); assertTrue(appsInRoot.contains(application_2.getApplicationAttemptId())); assertEquals(3,appsInRoot.size()); Assert.assertNull(scheduler.getAppsInQueue("nonexistentqueue")); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testKillAllAppsInvalidSource() throws Exception { MockRM rm=setUpMove(); AbstractYarnScheduler scheduler=(AbstractYarnScheduler)rm.getResourceScheduler(); RMApp app=rm.submitApp(GB,"test-move-1","user_0",null,"a1"); ApplicationAttemptId appAttemptId=rm.getApplicationReport(app.getApplicationId()).getCurrentApplicationAttemptId(); List appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); List appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); List appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); try { scheduler.killAllAppsInQueue("DOES_NOT_EXIST"); Assert.fail(); } catch ( YarnException e) { } appsInA1=scheduler.getAppsInQueue("a1"); assertEquals(1,appsInA1.size()); appsInA=scheduler.getAppsInQueue("a"); assertTrue(appsInA.contains(appAttemptId)); assertEquals(1,appsInA.size()); appsInRoot=scheduler.getAppsInQueue("root"); assertTrue(appsInRoot.contains(appAttemptId)); assertEquals(1,appsInRoot.size()); rm.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestContainerAllocation

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNormalContainerAllocationWhenDNSUnavailable() throws Exception { MockRM rm1=new MockRM(conf); rm1.start(); MockNM nm1=rm1.registerNode("unknownhost:1234",8000); RMApp app1=rm1.submitApp(200); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); am1.allocate("127.0.0.1",1024,1,new ArrayList()); ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2); rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED); SecurityUtilTestHelper.setTokenServiceUseIp(true); List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); Assert.assertEquals(0,containers.size()); SecurityUtilTestHelper.setTokenServiceUseIp(false); containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); Assert.assertEquals(1,containers.size()); }

    IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=20000) public void testAMContainerAllocationWhenDNSUnavailable() throws Exception { MockRM rm1=new MockRM(conf){ @Override protected RMSecretManagerService createRMSecretManagerService(){ return new TestRMSecretManagerService(conf,rmContext); } } ; rm1.start(); MockNM nm1=rm1.registerNode("unknownhost:1234",8000); SecurityUtilTestHelper.setTokenServiceUseIp(true); RMApp app1=rm1.submitApp(200); RMAppAttempt attempt=app1.getCurrentAppAttempt(); nm1.nodeHeartbeat(true); while (numRetries <= 5) { nm1.nodeHeartbeat(true); Thread.sleep(1000); Assert.assertEquals(RMAppAttemptState.SCHEDULED,attempt.getAppAttemptState()); System.out.println("Waiting for am container to be allocated."); } SecurityUtilTestHelper.setTokenServiceUseIp(false); rm1.waitForState(attempt.getAppAttemptId(),RMAppAttemptState.ALLOCATED); MockRM.launchAndRegisterAM(app1,rm1,nm1); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testContainerTokenGeneratedOnPullRequest() throws Exception { MockRM rm1=new MockRM(conf); rm1.start(); MockNM nm1=rm1.registerNode("127.0.0.1:1234",8000); RMApp app1=rm1.submitApp(200); MockAM am1=MockRM.launchAndRegisterAM(app1,rm1,nm1); am1.allocate("127.0.0.1",1024,1,new ArrayList()); ContainerId containerId2=ContainerId.newInstance(am1.getApplicationAttemptId(),2); rm1.waitForState(nm1,containerId2,RMContainerState.ALLOCATED); RMContainer container=rm1.getResourceScheduler().getRMContainer(containerId2); Assert.assertEquals(containerId2,container.getContainerId()); Assert.assertNull(container.getContainer().getContainerToken()); List containers=am1.allocate(new ArrayList(),new ArrayList()).getAllocatedContainers(); Assert.assertEquals(containerId2,containers.get(0).getId()); Assert.assertNotNull(containers.get(0).getContainerToken()); rm1.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=3000000) public void testExcessReservationThanNodeManagerCapacity() throws Exception { MockRM rm=new MockRM(conf); rm.start(); MockNM nm1=rm.registerNode("127.0.0.1:1234",2 * GB,4); MockNM nm2=rm.registerNode("127.0.0.1:2234",3 * GB,4); nm1.nodeHeartbeat(true); nm2.nodeHeartbeat(true); int waitCount=20; int size=rm.getRMContext().getRMNodes().size(); while ((size=rm.getRMContext().getRMNodes().size()) != 2 && waitCount-- > 0) { LOG.info("Waiting for node managers to register : " + size); Thread.sleep(100); } Assert.assertEquals(2,rm.getRMContext().getRMNodes().size()); RMApp app1=rm.submitApp(128); nm1.nodeHeartbeat(true); RMAppAttempt attempt1=app1.getCurrentAppAttempt(); MockAM am1=rm.sendAMLaunched(attempt1.getAppAttemptId()); am1.registerAppAttempt(); LOG.info("sending container requests "); am1.addRequests(new String[]{"*"},3 * GB,1,1); AllocateResponse alloc1Response=am1.schedule(); nm1.nodeHeartbeat(true); int waitCounter=20; LOG.info("heartbeating nm1"); while (alloc1Response.getAllocatedContainers().size() < 1 && waitCounter-- > 0) { LOG.info("Waiting for containers to be created for app 1..."); Thread.sleep(500); alloc1Response=am1.schedule(); } LOG.info("received container : " + alloc1Response.getAllocatedContainers().size()); Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 0); LOG.info("heartbeating nm2"); waitCounter=20; nm2.nodeHeartbeat(true); while (alloc1Response.getAllocatedContainers().size() < 1 && waitCounter-- > 0) { LOG.info("Waiting for containers to be created for app 1..."); Thread.sleep(500); alloc1Response=am1.schedule(); } LOG.info("received container : " + alloc1Response.getAllocatedContainers().size()); Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 1); rm.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestLeafQueue

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleQueueOneUserMetrics() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(B)); final String user_0="user_0"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext); a.submitApplicationAttempt(app_0,user_0); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,mock(ActiveUsersManager.class),rmContext); a.submitApplicationAttempt(app_1,user_0); String host_0="127.0.0.1"; FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB); final int numNodes=1; Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); Priority priority=TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,3,true,priority,recordFactory))); a.assignContainers(clusterResource,node_0); assertEquals((int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - (1 * GB),a.getMetrics().getAvailableMB()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleQueueWithOneUser() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); a.setMaxCapacity(1.0f); final String user_0="user_0"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext); a.submitApplicationAttempt(app_0,user_0); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,mock(ActiveUsersManager.class),rmContext); a.submitApplicationAttempt(app_1,user_0); String host_0="127.0.0.1"; FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB); final int numNodes=1; Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); Priority priority=TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,3,true,priority,recordFactory))); app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory))); a.assignContainers(clusterResource,node_0); assertEquals(1 * GB,a.getUsedResources().getMemory()); assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(1 * GB,a.getMetrics().getAllocatedMB()); assertEquals(0 * GB,a.getMetrics().getAvailableMB()); a.assignContainers(clusterResource,node_0); assertEquals(2 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(2 * GB,a.getMetrics().getAllocatedMB()); a.assignContainers(clusterResource,node_0); assertEquals(2 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(2 * GB,a.getMetrics().getAllocatedMB()); a.setUserLimitFactor(10); a.assignContainers(clusterResource,node_0); assertEquals(3 * GB,a.getUsedResources().getMemory()); assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(3 * GB,a.getMetrics().getAllocatedMB()); a.assignContainers(clusterResource,node_0); assertEquals(4 * GB,a.getUsedResources().getMemory()); assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(4 * GB,a.getMetrics().getAllocatedMB()); a.setMaxCapacity(0.5f); a.assignContainers(clusterResource,node_0); assertEquals(4 * GB,a.getUsedResources().getMemory()); assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(4 * GB,a.getMetrics().getAllocatedMB()); for ( RMContainer rmContainer : app_0.getLiveContainers()) { a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null); } assertEquals(1 * GB,a.getUsedResources().getMemory()); assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(1 * GB,a.getMetrics().getAllocatedMB()); for ( RMContainer rmContainer : app_1.getLiveContainers()) { a.completedContainer(clusterResource,app_1,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null); } assertEquals(0 * GB,a.getUsedResources().getMemory()); assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(0 * GB,a.getMetrics().getAllocatedMB()); assertEquals((int)(a.getCapacity() * node_0.getTotalResource().getMemory()),a.getMetrics().getAvailableMB()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testActivateApplicationByUpdatingClusterResource() throws Exception { LeafQueue e=stubLeafQueue((LeafQueue)queues.get(E)); final String user_e="user_e"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_e,e,mock(ActiveUsersManager.class),rmContext); e.submitApplicationAttempt(app_0,user_e); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_e,e,mock(ActiveUsersManager.class),rmContext); e.submitApplicationAttempt(app_1,user_e); final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0); FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_e,e,mock(ActiveUsersManager.class),rmContext); e.submitApplicationAttempt(app_2,user_e); assertEquals(2,e.activeApplications.size()); assertEquals(1,e.pendingApplications.size()); e.updateClusterResource(Resources.createResource(200 * 16 * GB,100 * 32)); assertEquals(3,e.activeApplications.size()); assertEquals(0,e.pendingApplications.size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testNodeLocalityAfterQueueRefresh() throws Exception { LeafQueue e=stubLeafQueue((LeafQueue)queues.get(E)); assertEquals(40,e.getNodeLocalityDelay()); csConf.setInt(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY,60); Map newQueues=new HashMap(); CSQueue newRoot=CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,newQueues,queues,TestUtils.spyHook); queues=newQueues; root.reinitialize(newRoot,cs.getClusterResource()); assertEquals(60,e.getNodeLocalityDelay()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testMaxAMResourcePerQueuePercentAfterQueueRefresh() throws Exception { CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); Resource clusterResource=Resources.createResource(100 * 16 * GB,100 * 32); CapacitySchedulerContext csContext=mockCSContext(csConf,clusterResource); csConf.setFloat(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,0.1f); ParentQueue root=new ParentQueue(csContext,CapacitySchedulerConfiguration.ROOT,null,null); csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + A,80); LeafQueue a=new LeafQueue(csContext,A,root,null); assertEquals(0.1f,a.getMaxAMResourcePerQueuePercent(),1e-3f); assertEquals(160,a.getMaximumActiveApplications()); csConf.setFloat(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,0.2f); LeafQueue newA=new LeafQueue(csContext,A,root,null); a.reinitialize(newA,clusterResource); assertEquals(0.2f,a.getMaxAMResourcePerQueuePercent(),1e-3f); assertEquals(320,a.getMaximumActiveApplications()); Resource newClusterResource=Resources.createResource(100 * 20 * GB,100 * 32); a.updateClusterResource(newClusterResource); assertEquals(400,a.getMaximumActiveApplications()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testLocalityConstraints() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); String user_0="user_0"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext)); a.submitApplicationAttempt(app_0,user_0); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=spy(new FiCaSchedulerApp(appAttemptId_1,user_0,a,mock(ActiveUsersManager.class),rmContext)); a.submitApplicationAttempt(app_1,user_0); String host_0_0="127.0.0.1"; String rack_0="rack_0"; String host_0_1="127.0.0.2"; FiCaSchedulerNode node_0_1=TestUtils.getMockNode(host_0_1,rack_0,0,8 * GB); String host_1_0="127.0.0.3"; String rack_1="rack_1"; FiCaSchedulerNode node_1_0=TestUtils.getMockNode(host_1_0,rack_1,0,8 * GB); String host_1_1="127.0.0.4"; FiCaSchedulerNode node_1_1=TestUtils.getMockNode(host_1_1,rack_1,0,8 * GB); final int numNodes=4; Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 1); when(csContext.getNumClusterNodes()).thenReturn(numNodes); Priority priority=TestUtils.createMockPriority(1); List app_0_requests_0=new ArrayList(); app_0_requests_0.add(TestUtils.createResourceRequest(host_0_0,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(host_1_0,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,false,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,false,priority,recordFactory)); app_0.updateResourceRequests(app_0_requests_0); app_0.updateBlacklist(Collections.singletonList(host_0_0),null); app_0_requests_0.clear(); a.assignContainers(clusterResource,node_0_1); verify(app_0,never()).allocate(any(NodeType.class),eq(node_0_1),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); a.assignContainers(clusterResource,node_1_1); verify(app_0,never()).allocate(any(NodeType.class),eq(node_0_1),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory)); app_0.updateResourceRequests(app_0_requests_0); app_0.updateBlacklist(Collections.singletonList(host_1_1),null); app_0_requests_0.clear(); a.assignContainers(clusterResource,node_1_1); verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_1),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); app_0.updateResourceRequests(app_0_requests_0); app_0.updateBlacklist(Collections.singletonList(rack_1),Collections.singletonList(host_1_1)); app_0_requests_0.clear(); a.assignContainers(clusterResource,node_1_1); verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_1),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); app_0.updateResourceRequests(app_0_requests_0); app_0.updateBlacklist(null,Collections.singletonList(rack_1)); app_0_requests_0.clear(); a.assignContainers(clusterResource,node_1_1); verify(app_0,never()).allocate(eq(NodeType.RACK_LOCAL),eq(node_1_1),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); assertEquals(1,app_0.getTotalRequiredResources(priority)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,false,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,false,priority,recordFactory)); app_0.updateResourceRequests(app_0_requests_0); app_0_requests_0.clear(); a.assignContainers(clusterResource,node_1_0); verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); assertEquals(0,app_0.getTotalRequiredResources(priority)); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testStolenReservedContainer() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); a.setMaxCapacity(1.0f); final String user_0="user_0"; final String user_1="user_1"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext); a.submitApplicationAttempt(app_0,user_0); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_1,a,mock(ActiveUsersManager.class),rmContext); a.submitApplicationAttempt(app_1,user_1); String host_0="127.0.0.1"; FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,4 * GB); String host_1="127.0.0.2"; FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,4 * GB); final int numNodes=3; Resource clusterResource=Resources.createResource(numNodes * (4 * GB),numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); Priority priority=TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority,recordFactory))); ArrayList appRequests_1=new ArrayList(4); appRequests_1.add(TestUtils.createResourceRequest(host_0,4 * GB,1,true,priority,recordFactory)); appRequests_1.add(TestUtils.createResourceRequest(DEFAULT_RACK,4 * GB,1,true,priority,recordFactory)); appRequests_1.add(TestUtils.createResourceRequest(ResourceRequest.ANY,4 * GB,2,true,priority,recordFactory)); app_1.updateResourceRequests(appRequests_1); a.assignContainers(clusterResource,node_0); assertEquals(2 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(2 * GB,a.getMetrics().getAllocatedMB()); assertEquals(0 * GB,a.getMetrics().getAvailableMB()); a.assignContainers(clusterResource,node_0); assertEquals(6 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(2 * GB,node_0.getUsedResource().getMemory()); assertEquals(4 * GB,a.getMetrics().getReservedMB()); assertEquals(2 * GB,a.getMetrics().getAllocatedMB()); doReturn(-1).when(a).getNodeLocalityDelay(); a.assignContainers(clusterResource,node_1); assertEquals(10 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(4 * GB,node_1.getUsedResource().getMemory()); assertEquals(4 * GB,a.getMetrics().getReservedMB()); assertEquals(6 * GB,a.getMetrics().getAllocatedMB()); RMContainer rmContainer=app_0.getLiveContainers().iterator().next(); a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null); a.assignContainers(clusterResource,node_0); assertEquals(8 * GB,a.getUsedResources().getMemory()); assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(8 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(4 * GB,node_0.getUsedResource().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(8 * GB,a.getMetrics().getAllocatedMB()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleQueueWithMultipleUsers() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); a.setMaxCapacity(1.0f); final String user_0="user_0"; final String user_1="user_1"; final String user_2="user_2"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,a.getActiveUsersManager(),rmContext); a.submitApplicationAttempt(app_0,user_0); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,a.getActiveUsersManager(),rmContext); a.submitApplicationAttempt(app_1,user_0); final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0); FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_1,a,a.getActiveUsersManager(),rmContext); a.submitApplicationAttempt(app_2,user_1); final ApplicationAttemptId appAttemptId_3=TestUtils.getMockApplicationAttemptId(3,0); FiCaSchedulerApp app_3=new FiCaSchedulerApp(appAttemptId_3,user_2,a,a.getActiveUsersManager(),rmContext); a.submitApplicationAttempt(app_3,user_2); String host_0="127.0.0.1"; FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB); final int numNodes=1; Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); Priority priority=TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,10,true,priority,recordFactory))); app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,10,true,priority,recordFactory))); a.assignContainers(clusterResource,node_0); assertEquals(1 * GB,a.getUsedResources().getMemory()); assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); a.assignContainers(clusterResource,node_0); assertEquals(2 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); a.setUserLimit(25); a.assignContainers(clusterResource,node_0); assertEquals(2 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,3 * GB,1,true,priority,recordFactory))); app_3.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory))); a.setUserLimitFactor(10); a.assignContainers(clusterResource,node_0); assertEquals(5 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory()); a.assignContainers(clusterResource,node_0); assertEquals(6 * GB,a.getUsedResources().getMemory()); assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory()); a.setMaxCapacity(0.5f); a.assignContainers(clusterResource,node_0); assertEquals(6 * GB,a.getUsedResources().getMemory()); assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory()); a.setMaxCapacity(1.0f); a.setUserLimitFactor(1); a.assignContainers(clusterResource,node_0); assertEquals(7 * GB,a.getUsedResources().getMemory()); assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory()); assertEquals(1 * GB,app_3.getCurrentConsumption().getMemory()); a.assignContainers(clusterResource,node_0); assertEquals(8 * GB,a.getUsedResources().getMemory()); assertEquals(3 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory()); assertEquals(2 * GB,app_3.getCurrentConsumption().getMemory()); for ( RMContainer rmContainer : app_0.getLiveContainers()) { a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null); } assertEquals(5 * GB,a.getUsedResources().getMemory()); assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(3 * GB,app_2.getCurrentConsumption().getMemory()); assertEquals(2 * GB,app_3.getCurrentConsumption().getMemory()); for ( RMContainer rmContainer : app_2.getLiveContainers()) { a.completedContainer(clusterResource,app_2,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null); } assertEquals(2 * GB,a.getUsedResources().getMemory()); assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_2.getCurrentConsumption().getMemory()); assertEquals(2 * GB,app_3.getCurrentConsumption().getMemory()); for ( RMContainer rmContainer : app_3.getLiveContainers()) { a.completedContainer(clusterResource,app_3,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null); } assertEquals(0 * GB,a.getUsedResources().getMemory()); assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_2.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_3.getCurrentConsumption().getMemory()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testUserLimits() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); a.setMaxCapacity(1.0f); final String user_0="user_0"; final String user_1="user_1"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,a.getActiveUsersManager(),rmContext); a.submitApplicationAttempt(app_0,user_0); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,a.getActiveUsersManager(),rmContext); a.submitApplicationAttempt(app_1,user_0); final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0); FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_1,a,a.getActiveUsersManager(),rmContext); a.submitApplicationAttempt(app_2,user_1); String host_0="127.0.0.1"; FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB); String host_1="127.0.0.2"; FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,8 * GB); final int numNodes=2; Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); Priority priority=TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority,recordFactory))); app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory))); a.setUserLimit(50); a.setUserLimitFactor(2); assertEquals("There should only be 1 active user!",1,a.getActiveUsersManager().getNumActiveUsers()); a.assignContainers(clusterResource,node_0); assertEquals(2 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); a.assignContainers(clusterResource,node_0); assertEquals(3 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory()); a.assignContainers(clusterResource,node_1); assertEquals(4 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(2 * GB,app_1.getCurrentConsumption().getMemory()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testInitializeQueue() throws Exception { final float epsilon=1e-5f; LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); assertEquals(0.085,a.getCapacity(),epsilon); assertEquals(0.085,a.getAbsoluteCapacity(),epsilon); assertEquals(0.2,a.getMaximumCapacity(),epsilon); assertEquals(0.2,a.getAbsoluteMaximumCapacity(),epsilon); LeafQueue b=stubLeafQueue((LeafQueue)queues.get(B)); assertEquals(0.80,b.getCapacity(),epsilon); assertEquals(0.80,b.getAbsoluteCapacity(),epsilon); assertEquals(0.99,b.getMaximumCapacity(),epsilon); assertEquals(0.99,b.getAbsoluteMaximumCapacity(),epsilon); ParentQueue c=(ParentQueue)queues.get(C); assertEquals(0.015,c.getCapacity(),epsilon); assertEquals(0.015,c.getAbsoluteCapacity(),epsilon); assertEquals(0.1,c.getMaximumCapacity(),epsilon); assertEquals(0.1,c.getAbsoluteMaximumCapacity(),epsilon); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testHeadroomWithMaxCap() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); a.setMaxCapacity(1.0f); final String user_0="user_0"; final String user_1="user_1"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,a.getActiveUsersManager(),rmContext); a.submitApplicationAttempt(app_0,user_0); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,a.getActiveUsersManager(),rmContext); a.submitApplicationAttempt(app_1,user_0); final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0); FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_1,a,a.getActiveUsersManager(),rmContext); a.submitApplicationAttempt(app_2,user_1); String host_0="127.0.0.1"; FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,8 * GB); String host_1="127.0.0.2"; FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,8 * GB); final int numNodes=2; Resource clusterResource=Resources.createResource(numNodes * (8 * GB),1); when(csContext.getNumClusterNodes()).thenReturn(numNodes); Priority priority=TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority,recordFactory))); app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory))); a.setUserLimit(50); a.setUserLimitFactor(2); assertEquals("There should only be 1 active user!",1,a.getActiveUsersManager().getNumActiveUsers()); a.assignContainers(clusterResource,node_0); assertEquals(2 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_0.getHeadroom().getMemory()); assertEquals(0 * GB,app_1.getHeadroom().getMemory()); a.assignContainers(clusterResource,node_0); assertEquals(3 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_0.getHeadroom().getMemory()); assertEquals(0 * GB,app_1.getHeadroom().getMemory()); a.setMaxCapacity(.1f); app_2.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,true,priority,recordFactory))); assertEquals(2,a.getActiveUsersManager().getNumActiveUsers()); a.assignContainers(clusterResource,node_1); assertEquals(3 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(1 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_2.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_0.getHeadroom().getMemory()); assertEquals(0 * GB,app_1.getHeadroom().getMemory()); LOG.info("here"); app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,0,true,priority,recordFactory))); assertEquals(1,a.getActiveUsersManager().getNumActiveUsers()); a.assignContainers(clusterResource,node_1); assertEquals(1 * GB,app_2.getHeadroom().getMemory()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testLocalityScheduling() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); String user_0="user_0"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext)); a.submitApplicationAttempt(app_0,user_0); String host_0="127.0.0.1"; String rack_0="rack_0"; FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,rack_0,0,8 * GB); String host_1="127.0.0.2"; String rack_1="rack_1"; FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,rack_1,0,8 * GB); String host_2="127.0.0.3"; String rack_2="rack_2"; FiCaSchedulerNode node_2=TestUtils.getMockNode(host_2,rack_2,0,8 * GB); final int numNodes=3; Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); Priority priority=TestUtils.createMockPriority(1); List app_0_requests_0=new ArrayList(); app_0_requests_0.add(TestUtils.createResourceRequest(host_0,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_0,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(host_1,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,3,true,priority,recordFactory)); app_0.updateResourceRequests(app_0_requests_0); CSAssignment assignment=null; assignment=a.assignContainers(clusterResource,node_2); verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(1,app_0.getSchedulingOpportunities(priority)); assertEquals(3,app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.NODE_LOCAL,assignment.getType()); assignment=a.assignContainers(clusterResource,node_2); verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(2,app_0.getSchedulingOpportunities(priority)); assertEquals(3,app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.NODE_LOCAL,assignment.getType()); assignment=a.assignContainers(clusterResource,node_2); verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(3,app_0.getSchedulingOpportunities(priority)); assertEquals(3,app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.NODE_LOCAL,assignment.getType()); assignment=a.assignContainers(clusterResource,node_2); verify(app_0).allocate(eq(NodeType.OFF_SWITCH),eq(node_2),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(4,app_0.getSchedulingOpportunities(priority)); assertEquals(2,app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.OFF_SWITCH,assignment.getType()); assignment=a.assignContainers(clusterResource,node_0); verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_0),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); assertEquals(1,app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.NODE_LOCAL,assignment.getType()); assignment=a.assignContainers(clusterResource,node_1); verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_1),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); assertEquals(0,app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.NODE_LOCAL,assignment.getType()); app_0_requests_0.clear(); app_0_requests_0.add(TestUtils.createResourceRequest(host_1,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory)); app_0.updateResourceRequests(app_0_requests_0); assertEquals(2,app_0.getTotalRequiredResources(priority)); String host_3="127.0.0.4"; FiCaSchedulerNode node_3=TestUtils.getMockNode(host_3,rack_1,0,8 * GB); doReturn(1).when(a).getNodeLocalityDelay(); assignment=a.assignContainers(clusterResource,node_3); assertEquals(1,app_0.getSchedulingOpportunities(priority)); assertEquals(2,app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.NODE_LOCAL,assignment.getType()); assignment=a.assignContainers(clusterResource,node_3); verify(app_0).allocate(eq(NodeType.RACK_LOCAL),eq(node_3),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); assertEquals(1,app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.RACK_LOCAL,assignment.getType()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppAttemptMetrics() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(B)); final String user_0="user_0"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,1); AppAddedSchedulerEvent addAppEvent=new AppAddedSchedulerEvent(appAttemptId_0.getApplicationId(),a.getQueueName(),user_0); cs.handle(addAppEvent); AppAttemptAddedSchedulerEvent addAttemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId_0,false); cs.handle(addAttemptEvent); AppAttemptRemovedSchedulerEvent event=new AppAttemptRemovedSchedulerEvent(appAttemptId_0,RMAppAttemptState.FAILED,false); cs.handle(event); assertEquals(0,a.getMetrics().getAppsPending()); assertEquals(0,a.getMetrics().getAppsFailed()); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(0,2); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_0,a,null,rmContext); a.submitApplicationAttempt(app_1,user_0); assertEquals(1,a.getMetrics().getAppsSubmitted()); assertEquals(1,a.getMetrics().getAppsPending()); event=new AppAttemptRemovedSchedulerEvent(appAttemptId_0,RMAppAttemptState.FINISHED,false); cs.handle(event); AppRemovedSchedulerEvent rEvent=new AppRemovedSchedulerEvent(appAttemptId_0.getApplicationId(),RMAppState.FINISHED); cs.handle(rEvent); assertEquals(1,a.getMetrics().getAppsSubmitted()); assertEquals(0,a.getMetrics().getAppsPending()); assertEquals(0,a.getMetrics().getAppsFailed()); assertEquals(1,a.getMetrics().getAppsCompleted()); QueueMetrics userMetrics=a.getMetrics().getUserMetrics(user_0); assertEquals(1,userMetrics.getAppsSubmitted()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testApplicationPriorityScheduling() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); String user_0="user_0"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext)); a.submitApplicationAttempt(app_0,user_0); String host_0="127.0.0.1"; String rack_0="rack_0"; FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,rack_0,0,8 * GB); String host_1="127.0.0.2"; String rack_1="rack_1"; FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,rack_1,0,8 * GB); String host_2="127.0.0.3"; String rack_2="rack_2"; FiCaSchedulerNode node_2=TestUtils.getMockNode(host_2,rack_2,0,8 * GB); final int numNodes=3; Resource clusterResource=Resources.createResource(numNodes * (8 * GB),1); when(csContext.getNumClusterNodes()).thenReturn(numNodes); List app_0_requests_0=new ArrayList(); Priority priority_1=TestUtils.createMockPriority(1); app_0_requests_0.add(TestUtils.createResourceRequest(host_0,1 * GB,1,true,priority_1,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_0,1 * GB,1,true,priority_1,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(host_1,1 * GB,1,true,priority_1,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority_1,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority_1,recordFactory)); Priority priority_2=TestUtils.createMockPriority(2); app_0_requests_0.add(TestUtils.createResourceRequest(host_2,2 * GB,1,true,priority_2,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_2,2 * GB,1,true,priority_2,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,2 * GB,1,true,priority_2,recordFactory)); app_0.updateResourceRequests(app_0_requests_0); a.assignContainers(clusterResource,node_2); verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_1),any(ResourceRequest.class),any(Container.class)); assertEquals(1,app_0.getSchedulingOpportunities(priority_1)); assertEquals(2,app_0.getTotalRequiredResources(priority_1)); verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_2),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority_2)); assertEquals(1,app_0.getTotalRequiredResources(priority_2)); a.assignContainers(clusterResource,node_2); verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_1),any(ResourceRequest.class),any(Container.class)); assertEquals(2,app_0.getSchedulingOpportunities(priority_1)); assertEquals(2,app_0.getTotalRequiredResources(priority_1)); verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_2),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority_2)); assertEquals(1,app_0.getTotalRequiredResources(priority_2)); a.assignContainers(clusterResource,node_2); verify(app_0).allocate(eq(NodeType.OFF_SWITCH),eq(node_2),eq(priority_1),any(ResourceRequest.class),any(Container.class)); assertEquals(3,app_0.getSchedulingOpportunities(priority_1)); assertEquals(1,app_0.getTotalRequiredResources(priority_1)); verify(app_0,never()).allocate(any(NodeType.class),eq(node_2),eq(priority_2),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority_2)); assertEquals(1,app_0.getTotalRequiredResources(priority_2)); a.assignContainers(clusterResource,node_0); verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_0),eq(priority_1),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority_1)); assertEquals(0,app_0.getTotalRequiredResources(priority_1)); verify(app_0,never()).allocate(any(NodeType.class),eq(node_0),eq(priority_2),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority_2)); assertEquals(1,app_0.getTotalRequiredResources(priority_2)); a.assignContainers(clusterResource,node_1); verify(app_0,never()).allocate(any(NodeType.class),eq(node_1),eq(priority_1),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority_1)); assertEquals(0,app_0.getTotalRequiredResources(priority_1)); verify(app_0).allocate(eq(NodeType.OFF_SWITCH),eq(node_1),eq(priority_2),any(ResourceRequest.class),any(Container.class)); assertEquals(1,app_0.getSchedulingOpportunities(priority_2)); assertEquals(0,app_0.getTotalRequiredResources(priority_2)); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testReservationExchange() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); a.setMaxCapacity(1.0f); a.setUserLimitFactor(10); final String user_0="user_0"; final String user_1="user_1"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext); a.submitApplicationAttempt(app_0,user_0); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_1,a,mock(ActiveUsersManager.class),rmContext); a.submitApplicationAttempt(app_1,user_1); String host_0="127.0.0.1"; FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,4 * GB); String host_1="127.0.0.2"; FiCaSchedulerNode node_1=TestUtils.getMockNode(host_1,DEFAULT_RACK,0,4 * GB); final int numNodes=3; Resource clusterResource=Resources.createResource(numNodes * (4 * GB),numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(4 * GB,16)); when(a.getMaximumAllocation()).thenReturn(Resources.createResource(4 * GB,16)); when(a.getMinimumAllocationFactor()).thenReturn(0.25f); Priority priority=TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory))); app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,4 * GB,1,true,priority,recordFactory))); a.assignContainers(clusterResource,node_0); assertEquals(1 * GB,a.getUsedResources().getMemory()); assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); a.assignContainers(clusterResource,node_0); assertEquals(2 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); a.assignContainers(clusterResource,node_0); assertEquals(6 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(2 * GB,node_0.getUsedResource().getMemory()); RMContainer rmContainer=app_0.getLiveContainers().iterator().next(); a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null); a.assignContainers(clusterResource,node_0); assertEquals(5 * GB,a.getUsedResources().getMemory()); assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(1 * GB,node_0.getUsedResource().getMemory()); assertEquals(1,app_1.getReReservations(priority)); a.assignContainers(clusterResource,node_0); assertEquals(5 * GB,a.getUsedResources().getMemory()); assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(1 * GB,node_0.getUsedResource().getMemory()); assertEquals(2,app_1.getReReservations(priority)); a.assignContainers(clusterResource,node_1); assertEquals(9 * GB,a.getUsedResources().getMemory()); assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(4 * GB,node_1.getUsedResource().getMemory()); assertEquals(2,app_1.getReReservations(priority)); rmContainer=app_0.getLiveContainers().iterator().next(); a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null); CSAssignment assignment=a.assignContainers(clusterResource,node_0); assertEquals(8 * GB,a.getUsedResources().getMemory()); assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(0 * GB,node_0.getUsedResource().getMemory()); assertEquals(4 * GB,assignment.getExcessReservation().getContainer().getResource().getMemory()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSchedulingConstraints() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); String user_0="user_0"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=spy(new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext)); a.submitApplicationAttempt(app_0,user_0); String host_0_0="127.0.0.1"; String rack_0="rack_0"; FiCaSchedulerNode node_0_0=TestUtils.getMockNode(host_0_0,rack_0,0,8 * GB); String host_0_1="127.0.0.2"; FiCaSchedulerNode node_0_1=TestUtils.getMockNode(host_0_1,rack_0,0,8 * GB); String host_1_0="127.0.0.3"; String rack_1="rack_1"; FiCaSchedulerNode node_1_0=TestUtils.getMockNode(host_1_0,rack_1,0,8 * GB); final int numNodes=3; Resource clusterResource=Resources.createResource(numNodes * (8 * GB),numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); Priority priority=TestUtils.createMockPriority(1); List app_0_requests_0=new ArrayList(); app_0_requests_0.add(TestUtils.createResourceRequest(host_0_0,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(host_0_1,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_0,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(host_1_0,1 * GB,1,true,priority,recordFactory)); app_0_requests_0.add(TestUtils.createResourceRequest(rack_1,1 * GB,1,true,priority,recordFactory)); app_0.updateResourceRequests(app_0_requests_0); app_0_requests_0.clear(); app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,true,priority,recordFactory)); app_0.updateResourceRequests(app_0_requests_0); a.assignContainers(clusterResource,node_0_0); verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_0_0),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); assertEquals(0,app_0.getTotalRequiredResources(priority)); a.assignContainers(clusterResource,node_1_0); verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); assertEquals(0,app_0.getTotalRequiredResources(priority)); app_0_requests_0.clear(); app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,1,true,priority,recordFactory)); app_0.updateResourceRequests(app_0_requests_0); a.assignContainers(clusterResource,node_0_1); verify(app_0,never()).allocate(any(NodeType.class),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(1,app_0.getSchedulingOpportunities(priority)); assertEquals(1,app_0.getTotalRequiredResources(priority)); a.assignContainers(clusterResource,node_1_0); verify(app_0).allocate(eq(NodeType.NODE_LOCAL),eq(node_1_0),any(Priority.class),any(ResourceRequest.class),any(Container.class)); assertEquals(0,app_0.getSchedulingOpportunities(priority)); assertEquals(0,app_0.getTotalRequiredResources(priority)); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testReservation() throws Exception { LeafQueue a=stubLeafQueue((LeafQueue)queues.get(A)); a.setMaxCapacity(1.0f); final String user_0="user_0"; final String user_1="user_1"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_0,a,mock(ActiveUsersManager.class),rmContext); a.submitApplicationAttempt(app_0,user_0); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_1,a,mock(ActiveUsersManager.class),rmContext); a.submitApplicationAttempt(app_1,user_1); String host_0="127.0.0.1"; FiCaSchedulerNode node_0=TestUtils.getMockNode(host_0,DEFAULT_RACK,0,4 * GB); final int numNodes=2; Resource clusterResource=Resources.createResource(numNodes * (4 * GB),numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); Priority priority=TestUtils.createMockPriority(1); app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,1 * GB,2,true,priority,recordFactory))); app_1.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY,4 * GB,1,true,priority,recordFactory))); a.assignContainers(clusterResource,node_0); assertEquals(1 * GB,a.getUsedResources().getMemory()); assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(1 * GB,a.getMetrics().getAllocatedMB()); assertEquals(0 * GB,a.getMetrics().getAvailableMB()); a.assignContainers(clusterResource,node_0); assertEquals(2 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(2 * GB,a.getMetrics().getAllocatedMB()); a.assignContainers(clusterResource,node_0); assertEquals(6 * GB,a.getUsedResources().getMemory()); assertEquals(2 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(2 * GB,node_0.getUsedResource().getMemory()); assertEquals(4 * GB,a.getMetrics().getReservedMB()); assertEquals(2 * GB,a.getMetrics().getAllocatedMB()); RMContainer rmContainer=app_0.getLiveContainers().iterator().next(); a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null); a.assignContainers(clusterResource,node_0); assertEquals(5 * GB,a.getUsedResources().getMemory()); assertEquals(1 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(1 * GB,node_0.getUsedResource().getMemory()); assertEquals(4 * GB,a.getMetrics().getReservedMB()); assertEquals(1 * GB,a.getMetrics().getAllocatedMB()); rmContainer=app_0.getLiveContainers().iterator().next(); a.completedContainer(clusterResource,app_0,node_0,rmContainer,ContainerStatus.newInstance(rmContainer.getContainerId(),ContainerState.COMPLETE,"",ContainerExitStatus.KILLED_BY_RESOURCEMANAGER),RMContainerEventType.KILL,null); a.assignContainers(clusterResource,node_0); assertEquals(4 * GB,a.getUsedResources().getMemory()); assertEquals(0 * GB,app_0.getCurrentConsumption().getMemory()); assertEquals(4 * GB,app_1.getCurrentConsumption().getMemory()); assertEquals(0 * GB,app_1.getCurrentReservation().getMemory()); assertEquals(4 * GB,node_0.getUsedResource().getMemory()); assertEquals(0 * GB,a.getMetrics().getReservedMB()); assertEquals(4 * GB,a.getMetrics().getAllocatedMB()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testActivateApplicationAfterQueueRefresh() throws Exception { LeafQueue e=stubLeafQueue((LeafQueue)queues.get(E)); final String user_e="user_e"; final ApplicationAttemptId appAttemptId_0=TestUtils.getMockApplicationAttemptId(0,0); FiCaSchedulerApp app_0=new FiCaSchedulerApp(appAttemptId_0,user_e,e,mock(ActiveUsersManager.class),rmContext); e.submitApplicationAttempt(app_0,user_e); final ApplicationAttemptId appAttemptId_1=TestUtils.getMockApplicationAttemptId(1,0); FiCaSchedulerApp app_1=new FiCaSchedulerApp(appAttemptId_1,user_e,e,mock(ActiveUsersManager.class),rmContext); e.submitApplicationAttempt(app_1,user_e); final ApplicationAttemptId appAttemptId_2=TestUtils.getMockApplicationAttemptId(2,0); FiCaSchedulerApp app_2=new FiCaSchedulerApp(appAttemptId_2,user_e,e,mock(ActiveUsersManager.class),rmContext); e.submitApplicationAttempt(app_2,user_e); assertEquals(2,e.activeApplications.size()); assertEquals(1,e.pendingApplications.size()); csConf.setDouble(CapacitySchedulerConfiguration.MAXIMUM_APPLICATION_MASTERS_RESOURCE_PERCENT,CapacitySchedulerConfiguration.DEFAULT_MAXIMUM_APPLICATIONMASTERS_RESOURCE_PERCENT * 2); Map newQueues=new HashMap(); CSQueue newRoot=CapacityScheduler.parseQueue(csContext,csConf,null,CapacitySchedulerConfiguration.ROOT,newQueues,queues,TestUtils.spyHook); queues=newQueues; root.reinitialize(newRoot,cs.getClusterResource()); assertEquals(3,e.activeApplications.size()); assertEquals(0,e.pendingApplications.size()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestQueueParsing

    InternalCallVerifier EqualityVerifier 
    @Test public void testQueueParsing() throws Exception { CapacitySchedulerConfiguration csConf=new CapacitySchedulerConfiguration(); setupQueueConfiguration(csConf); YarnConfiguration conf=new YarnConfiguration(csConf); CapacityScheduler capacityScheduler=new CapacityScheduler(); RMContextImpl rmContext=new RMContextImpl(null,null,null,null,null,null,new RMContainerTokenSecretManager(conf),new NMTokenSecretManagerInRM(conf),new ClientToAMTokenSecretManagerInRM(),null); capacityScheduler.setConf(conf); capacityScheduler.setRMContext(rmContext); capacityScheduler.init(conf); capacityScheduler.start(); capacityScheduler.reinitialize(conf,rmContext); CSQueue a=capacityScheduler.getQueue("a"); Assert.assertEquals(0.10,a.getAbsoluteCapacity(),DELTA); Assert.assertEquals(0.15,a.getAbsoluteMaximumCapacity(),DELTA); CSQueue b1=capacityScheduler.getQueue("b1"); Assert.assertEquals(0.2 * 0.5,b1.getAbsoluteCapacity(),DELTA); Assert.assertEquals("Parent B has no MAX_CAP",0.85,b1.getAbsoluteMaximumCapacity(),DELTA); CSQueue c12=capacityScheduler.getQueue("c12"); Assert.assertEquals(0.7 * 0.5 * 0.45,c12.getAbsoluteCapacity(),DELTA); Assert.assertEquals(0.7 * 0.55 * 0.7,c12.getAbsoluteMaximumCapacity(),DELTA); capacityScheduler.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestAllocationFileLoaderService

    InternalCallVerifier EqualityVerifier PublicFieldVerifier 
    @Test public void testBackwardsCompatibleAllocationFileParsing() throws Exception { Configuration conf=new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); AllocationFileLoaderService allocLoader=new AllocationFileLoaderService(); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println("alice,bob admins"); out.println(""); out.println(""); out.println("alice,bob admins"); out.println(""); out.println(""); out.println("3"); out.println(""); out.println(""); out.println("60"); out.println(""); out.println("15"); out.println("5"); out.println(""); out.println("10"); out.println(""); out.println("120" + ""); out.println("300"); out.println(""); out.close(); allocLoader.init(conf); ReloadListener confHolder=new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration queueConf=confHolder.allocConf; assertEquals(5,queueConf.getConfiguredQueues().get(FSQueueType.LEAF).size()); assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(Resources.createResource(1024,0),queueConf.getMinResources("root.queueA")); assertEquals(Resources.createResource(2048,0),queueConf.getMinResources("root.queueB")); assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueC")); assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueD")); assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueE")); assertEquals(15,queueConf.getQueueMaxApps("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(15,queueConf.getQueueMaxApps("root.queueA")); assertEquals(15,queueConf.getQueueMaxApps("root.queueB")); assertEquals(15,queueConf.getQueueMaxApps("root.queueC")); assertEquals(3,queueConf.getQueueMaxApps("root.queueD")); assertEquals(15,queueConf.getQueueMaxApps("root.queueE")); assertEquals(10,queueConf.getUserMaxApps("user1")); assertEquals(5,queueConf.getUserMaxApps("user2")); assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.ADMINISTER_QUEUE).getAclString()); assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueB",QueueACL.ADMINISTER_QUEUE).getAclString()); assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueC",QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueB")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueC")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueD")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA")); assertEquals(60000,queueConf.getMinSharePreemptionTimeout("root.queueE")); assertEquals(300000,queueConf.getFairSharePreemptionTimeout()); }

    InternalCallVerifier EqualityVerifier PublicFieldVerifier 
    @Test public void testSimplePlacementPolicyFromConf() throws Exception { Configuration conf=new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); conf.setBoolean(FairSchedulerConfiguration.ALLOW_UNDECLARED_POOLS,false); conf.setBoolean(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,false); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.close(); AllocationFileLoaderService allocLoader=new AllocationFileLoaderService(); allocLoader.init(conf); ReloadListener confHolder=new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration allocConf=confHolder.allocConf; QueuePlacementPolicy placementPolicy=allocConf.getPlacementPolicy(); List rules=placementPolicy.getRules(); assertEquals(2,rules.size()); assertEquals(QueuePlacementRule.Specified.class,rules.get(0).getClass()); assertEquals(false,rules.get(0).create); assertEquals(QueuePlacementRule.Default.class,rules.get(1).getClass()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test public void testAllocationFileParsing() throws Exception { Configuration conf=new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); AllocationFileLoaderService allocLoader=new AllocationFileLoaderService(); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println("alice,bob admins"); out.println("fair"); out.println(""); out.println(""); out.println("alice,bob admins"); out.println(""); out.println(""); out.println("3"); out.println("0.4"); out.println(""); out.println(""); out.println("60"); out.println(""); out.println(""); out.println(""); out.println(""); out.println(" "); out.println(" "); out.println(""); out.println("15"); out.println("5"); out.println("0.5f"); out.println(""); out.println("10"); out.println(""); out.println("120" + ""); out.println("300"); out.println("drf"); out.println(""); out.close(); allocLoader.init(conf); ReloadListener confHolder=new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration queueConf=confHolder.allocConf; assertEquals(6,queueConf.getConfiguredQueues().get(FSQueueType.LEAF).size()); assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(Resources.createResource(0),queueConf.getMinResources("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(Resources.createResource(1024,0),queueConf.getMinResources("root.queueA")); assertEquals(Resources.createResource(2048,0),queueConf.getMinResources("root.queueB")); assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueC")); assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueD")); assertEquals(Resources.createResource(0),queueConf.getMinResources("root.queueE")); assertEquals(15,queueConf.getQueueMaxApps("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(15,queueConf.getQueueMaxApps("root.queueA")); assertEquals(15,queueConf.getQueueMaxApps("root.queueB")); assertEquals(15,queueConf.getQueueMaxApps("root.queueC")); assertEquals(3,queueConf.getQueueMaxApps("root.queueD")); assertEquals(15,queueConf.getQueueMaxApps("root.queueE")); assertEquals(10,queueConf.getUserMaxApps("user1")); assertEquals(5,queueConf.getUserMaxApps("user2")); assertEquals(.5f,queueConf.getQueueMaxAMShare("root." + YarnConfiguration.DEFAULT_QUEUE_NAME),0.01); assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueA"),0.01); assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueB"),0.01); assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueC"),0.01); assertEquals(.4f,queueConf.getQueueMaxAMShare("root.queueD"),0.01); assertEquals(.5f,queueConf.getQueueMaxAMShare("root.queueE"),0.01); assertEquals("*",queueConf.getQueueAcl("root",QueueACL.ADMINISTER_QUEUE).getAclString()); assertEquals("*",queueConf.getQueueAcl("root",QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.ADMINISTER_QUEUE).getAclString()); assertEquals(" ",queueConf.getQueueAcl("root.queueA",QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueB",QueueACL.ADMINISTER_QUEUE).getAclString()); assertEquals("alice,bob admins",queueConf.getQueueAcl("root.queueC",QueueACL.SUBMIT_APPLICATIONS).getAclString()); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root." + YarnConfiguration.DEFAULT_QUEUE_NAME)); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueB")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueC")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueD")); assertEquals(120000,queueConf.getMinSharePreemptionTimeout("root.queueA")); assertEquals(60000,queueConf.getMinSharePreemptionTimeout("root.queueE")); assertEquals(300000,queueConf.getFairSharePreemptionTimeout()); assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.PARENT).contains("root.queueF")); assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.PARENT).contains("root.queueG")); assertTrue(queueConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueG.queueH")); assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root").getName()); assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root.queueA").getName()); assertEquals(FairSharePolicy.NAME,queueConf.getSchedulingPolicy("root.queueB").getName()); assertEquals(DominantResourceFairnessPolicy.NAME,queueConf.getSchedulingPolicy("root.newqueue").getName()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test(timeout=10000) public void testReload() throws Exception { PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(" "); out.println(" 1"); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(""); out.close(); MockClock clock=new MockClock(); Configuration conf=new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); AllocationFileLoaderService allocLoader=new AllocationFileLoaderService(clock); allocLoader.reloadIntervalMs=5; allocLoader.init(conf); ReloadListener confHolder=new ReloadListener(); allocLoader.setReloadListener(confHolder); allocLoader.reloadAllocations(); AllocationConfiguration allocConf=confHolder.allocConf; QueuePlacementPolicy policy=allocConf.getPlacementPolicy(); List rules=policy.getRules(); assertEquals(1,rules.size()); assertEquals(QueuePlacementRule.Default.class,rules.get(0).getClass()); assertEquals(1,allocConf.getQueueMaxApps("root.queueA")); assertEquals(2,allocConf.getConfiguredQueues().get(FSQueueType.LEAF).size()); assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueA")); assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueB")); confHolder.allocConf=null; out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(" "); out.println(" 3"); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(" "); out.println(""); out.close(); clock.tick(System.currentTimeMillis() + AllocationFileLoaderService.ALLOC_RELOAD_WAIT_MS + 10000); allocLoader.start(); while (confHolder.allocConf == null) { Thread.sleep(20); } allocConf=confHolder.allocConf; policy=allocConf.getPlacementPolicy(); rules=policy.getRules(); assertEquals(3,rules.size()); assertEquals(QueuePlacementRule.Specified.class,rules.get(0).getClass()); assertEquals(QueuePlacementRule.NestedUserQueue.class,rules.get(1).getClass()); assertEquals(QueuePlacementRule.PrimaryGroup.class,((NestedUserQueue)(rules.get(1))).nestedRule.getClass()); assertEquals(QueuePlacementRule.Default.class,rules.get(2).getClass()); assertEquals(3,allocConf.getQueueMaxApps("root.queueB")); assertEquals(1,allocConf.getConfiguredQueues().get(FSQueueType.LEAF).size()); assertTrue(allocConf.getConfiguredQueues().get(FSQueueType.LEAF).contains("root.queueB")); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetAllocationFileFromClasspath(){ Configuration conf=new Configuration(); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,"test-fair-scheduler.xml"); AllocationFileLoaderService allocLoader=new AllocationFileLoaderService(); File allocationFile=allocLoader.getAllocationFile(conf); assertEquals("test-fair-scheduler.xml",allocationFile.getName()); assertTrue(allocationFile.exists()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestFSAppAttempt

    IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testDelayScheduling(){ FSLeafQueue queue=Mockito.mock(FSLeafQueue.class); Priority prio=Mockito.mock(Priority.class); Mockito.when(prio.getPriority()).thenReturn(1); double nodeLocalityThreshold=.5; double rackLocalityThreshold=.6; ApplicationAttemptId applicationAttemptId=createAppAttemptId(1,1); RMContext rmContext=resourceManager.getRMContext(); FSAppAttempt schedulerApp=new FSAppAttempt(scheduler,applicationAttemptId,"user1",queue,null,rmContext); assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold)); for (int i=0; i < 5; i++) { schedulerApp.addSchedulingOpportunity(prio); assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold)); } schedulerApp.addSchedulingOpportunity(prio); assertEquals(NodeType.RACK_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold)); schedulerApp.resetAllowedLocalityLevel(prio,NodeType.NODE_LOCAL); schedulerApp.resetSchedulingOpportunities(prio); assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold)); for (int i=0; i < 5; i++) { schedulerApp.addSchedulingOpportunity(prio); assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold)); } schedulerApp.addSchedulingOpportunity(prio); assertEquals(NodeType.RACK_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold)); for (int i=0; i < 6; i++) { schedulerApp.addSchedulingOpportunity(prio); assertEquals(NodeType.RACK_LOCAL,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold)); } schedulerApp.addSchedulingOpportunity(prio); assertEquals(NodeType.OFF_SWITCH,schedulerApp.getAllowedLocalityLevel(prio,10,nodeLocalityThreshold,rackLocalityThreshold)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testLocalityLevelWithoutDelays(){ FSLeafQueue queue=Mockito.mock(FSLeafQueue.class); Priority prio=Mockito.mock(Priority.class); Mockito.when(prio.getPriority()).thenReturn(1); RMContext rmContext=resourceManager.getRMContext(); ApplicationAttemptId applicationAttemptId=createAppAttemptId(1,1); FSAppAttempt schedulerApp=new FSAppAttempt(scheduler,applicationAttemptId,"user1",queue,null,rmContext); assertEquals(NodeType.OFF_SWITCH,schedulerApp.getAllowedLocalityLevel(prio,10,-1.0,-1.0)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testDelaySchedulingForContinuousScheduling() throws InterruptedException { FSLeafQueue queue=scheduler.getQueueManager().getLeafQueue("queue",true); Priority prio=Mockito.mock(Priority.class); Mockito.when(prio.getPriority()).thenReturn(1); MockClock clock=new MockClock(); scheduler.setClock(clock); long nodeLocalityDelayMs=5 * 1000L; long rackLocalityDelayMs=6 * 1000L; RMContext rmContext=resourceManager.getRMContext(); ApplicationAttemptId applicationAttemptId=createAppAttemptId(1,1); FSAppAttempt schedulerApp=new FSAppAttempt(scheduler,applicationAttemptId,"user1",queue,null,rmContext); assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime())); clock.tick(4); assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime())); clock.tick(2); assertEquals(NodeType.RACK_LOCAL,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime())); schedulerApp.resetAllowedLocalityLevel(prio,NodeType.NODE_LOCAL); schedulerApp.resetSchedulingOpportunities(prio,clock.getTime()); assertEquals(NodeType.NODE_LOCAL,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime())); clock.tick(6); assertEquals(NodeType.RACK_LOCAL,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime())); clock.tick(7); assertEquals(NodeType.OFF_SWITCH,schedulerApp.getAllowedLocalityLevelByTime(prio,nodeLocalityDelayMs,rackLocalityDelayMs,clock.getTime())); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestFairScheduler

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testBasicDRFAssignment() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node=MockNodes.newNodeInfo(1,BuilderUtils.newResource(8192,5)); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); scheduler.handle(nodeEvent); ApplicationAttemptId appAttId1=createSchedulingRequest(2048,1,"queue1","user1",2); FSAppAttempt app1=scheduler.getSchedulerApp(appAttId1); ApplicationAttemptId appAttId2=createSchedulingRequest(1024,2,"queue1","user1",2); FSAppAttempt app2=scheduler.getSchedulerApp(appAttId2); DominantResourceFairnessPolicy drfPolicy=new DominantResourceFairnessPolicy(); drfPolicy.initialize(scheduler.getClusterResource()); scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(updateEvent); Assert.assertEquals(1,app1.getLiveContainers().size()); Assert.assertEquals(0,app2.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(1,app1.getLiveContainers().size()); Assert.assertEquals(1,app2.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(2,app1.getLiveContainers().size()); Assert.assertEquals(1,app2.getLiveContainers().size()); }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=2000) public void testLoadConfigurationOnInitialize() throws IOException { conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,true); conf.setInt(FairSchedulerConfiguration.MAX_ASSIGN,3); conf.setBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT,true); conf.setDouble(FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE,.5); conf.setDouble(FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK,.7); conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,true); conf.setInt(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS,10); conf.setInt(FairSchedulerConfiguration.LOCALITY_DELAY_RACK_MS,5000); conf.setInt(FairSchedulerConfiguration.LOCALITY_DELAY_NODE_MS,5000); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,1024); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,512); conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB,128); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); Assert.assertEquals(true,scheduler.assignMultiple); Assert.assertEquals(3,scheduler.maxAssign); Assert.assertEquals(true,scheduler.sizeBasedWeight); Assert.assertEquals(.5,scheduler.nodeLocalityThreshold,.01); Assert.assertEquals(.7,scheduler.rackLocalityThreshold,.01); Assert.assertTrue("The continuous scheduling should be enabled",scheduler.continuousSchedulingEnabled); Assert.assertEquals(10,scheduler.continuousSchedulingSleepMs); Assert.assertEquals(5000,scheduler.nodeLocalityDelayMs); Assert.assertEquals(5000,scheduler.rackLocalityDelayMs); Assert.assertEquals(1024,scheduler.getMaximumResourceCapability().getMemory()); Assert.assertEquals(512,scheduler.getMinimumResourceCapability().getMemory()); Assert.assertEquals(128,scheduler.getIncrementResourceCapability().getMemory()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=5000) public void testRecoverRequestAfterPreemption() throws Exception { conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,10); MockClock clock=new MockClock(); scheduler.setClock(clock); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); Priority priority=Priority.newInstance(20); String host="127.0.0.1"; int GB=1024; RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * 1024,4),0,host); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); scheduler.handle(nodeEvent); List ask=new ArrayList(); ResourceRequest nodeLocalRequest=createResourceRequest(GB,1,host,priority.getPriority(),1,true); ResourceRequest rackLocalRequest=createResourceRequest(GB,1,node.getRackName(),priority.getPriority(),1,true); ResourceRequest offRackRequest=createResourceRequest(GB,1,ResourceRequest.ANY,priority.getPriority(),1,true); ask.add(nodeLocalRequest); ask.add(rackLocalRequest); ask.add(offRackRequest); ApplicationAttemptId appAttemptId=createSchedulingRequest("queueA","user1",ask); scheduler.update(); NodeUpdateSchedulerEvent nodeUpdate=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeUpdate); assertEquals(1,scheduler.getSchedulerApp(appAttemptId).getLiveContainers().size()); FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId); Assert.assertNull(app.getResourceRequest(priority,host)); ContainerId containerId1=ContainerId.newInstance(appAttemptId,1); RMContainer rmContainer=app.getRMContainer(containerId1); scheduler.warnOrKillContainer(rmContainer); clock.tick(5); scheduler.warnOrKillContainer(rmContainer); List requests=rmContainer.getResourceRequests(); Assert.assertEquals(3,requests.size()); for ( ResourceRequest request : requests) { Assert.assertEquals(1,app.getResourceRequest(priority,request.getResourceName()).getNumContainers()); } scheduler.update(); scheduler.handle(nodeUpdate); List containers=scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,null).getContainers(); Assert.assertTrue(containers.size() == 1); }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=5000) public void testChoiceOfPreemptedContainers() throws Exception { conf.setLong(FairSchedulerConfiguration.PREEMPTION_INTERVAL,5000); conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,10000); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE + ".allocation.file",ALLOC_FILE); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"false"); MockClock clock=new MockClock(); scheduler.setClock(clock); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println(".25"); out.println(""); out.println(""); out.println(".25"); out.println(""); out.println(""); out.println(".25"); out.println(""); out.println(""); out.println(".25"); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(4 * 1024,4),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(4 * 1024,4),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); ApplicationAttemptId app1=createSchedulingRequest(1 * 1024,1,"queueA","user1",1,1); createSchedulingRequestExistingApplication(1 * 1024,1,2,app1); ApplicationAttemptId app2=createSchedulingRequest(1 * 1024,1,"queueA","user1",1,3); createSchedulingRequestExistingApplication(1 * 1024,1,4,app2); ApplicationAttemptId app3=createSchedulingRequest(1 * 1024,1,"queueB","user1",1,1); createSchedulingRequestExistingApplication(1 * 1024,1,2,app3); ApplicationAttemptId app4=createSchedulingRequest(1 * 1024,1,"queueB","user1",1,3); createSchedulingRequestExistingApplication(1 * 1024,1,4,app4); scheduler.update(); scheduler.getQueueManager().getLeafQueue("queueA",true).setPolicy(SchedulingPolicy.parse("fifo")); scheduler.getQueueManager().getLeafQueue("queueB",true).setPolicy(SchedulingPolicy.parse("fair")); NodeUpdateSchedulerEvent nodeUpdate1=new NodeUpdateSchedulerEvent(node1); NodeUpdateSchedulerEvent nodeUpdate2=new NodeUpdateSchedulerEvent(node2); for (int i=0; i < 4; i++) { scheduler.handle(nodeUpdate1); scheduler.handle(nodeUpdate2); } assertEquals(2,scheduler.getSchedulerApp(app1).getLiveContainers().size()); assertEquals(2,scheduler.getSchedulerApp(app2).getLiveContainers().size()); assertEquals(2,scheduler.getSchedulerApp(app3).getLiveContainers().size()); assertEquals(2,scheduler.getSchedulerApp(app4).getLiveContainers().size()); createSchedulingRequest(1 * 1024,1,"queueC","user1",1,1); createSchedulingRequest(1 * 1024,1,"queueC","user1",1,1); createSchedulingRequest(1 * 1024,1,"default","user1",1,1); createSchedulingRequest(1 * 1024,1,"default","user1",1,1); scheduler.update(); scheduler.preemptResources(Resources.createResource(2 * 1024)); assertEquals(2,scheduler.getSchedulerApp(app1).getLiveContainers().size()); assertEquals(2,scheduler.getSchedulerApp(app3).getLiveContainers().size()); assertTrue("App2 should have container to be preempted",!Collections.disjoint(scheduler.getSchedulerApp(app2).getLiveContainers(),scheduler.getSchedulerApp(app2).getPreemptionContainers())); assertTrue("App4 should have container to be preempted",!Collections.disjoint(scheduler.getSchedulerApp(app2).getLiveContainers(),scheduler.getSchedulerApp(app2).getPreemptionContainers())); clock.tick(15); scheduler.preemptResources(Resources.createResource(2 * 1024)); assertEquals(1,scheduler.getSchedulerApp(app2).getLiveContainers().size()); assertEquals(1,scheduler.getSchedulerApp(app4).getLiveContainers().size()); Set set=new HashSet(); for ( RMContainer container : scheduler.getSchedulerApp(app2).getLiveContainers()) { if (container.getAllocatedPriority().getPriority() == 4) { set.add(container); } } for ( RMContainer container : scheduler.getSchedulerApp(app4).getLiveContainers()) { if (container.getAllocatedPriority().getPriority() == 4) { set.add(container); } } assertTrue("Containers with priority=4 in app2 and app4 should be " + "preempted.",set.isEmpty()); scheduler.preemptResources(Resources.createResource(2 * 1024)); clock.tick(15); scheduler.preemptResources(Resources.createResource(2 * 1024)); assertEquals(2,scheduler.getSchedulerApp(app1).getLiveContainers().size()); assertEquals(0,scheduler.getSchedulerApp(app2).getLiveContainers().size()); assertEquals(1,scheduler.getSchedulerApp(app3).getLiveContainers().size()); assertEquals(1,scheduler.getSchedulerApp(app4).getLiveContainers().size()); scheduler.preemptResources(Resources.createResource(2 * 1024)); assertTrue("App1 should have no container to be preempted",scheduler.getSchedulerApp(app1).getPreemptionContainers().isEmpty()); assertTrue("App2 should have no container to be preempted",scheduler.getSchedulerApp(app2).getPreemptionContainers().isEmpty()); assertTrue("App3 should have no container to be preempted",scheduler.getSchedulerApp(app3).getPreemptionContainers().isEmpty()); assertTrue("App4 should have no container to be preempted",scheduler.getSchedulerApp(app4).getPreemptionContainers().isEmpty()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMoveMakesAppRunnable() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueMgr=scheduler.getQueueManager(); FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true); FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true); scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue1",0); ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3); FSAppAttempt app=scheduler.getSchedulerApp(appAttId); assertTrue(oldQueue.getNonRunnableAppSchedulables().contains(app)); scheduler.moveApplication(appAttId.getApplicationId(),"queue2"); assertFalse(oldQueue.getNonRunnableAppSchedulables().contains(app)); assertFalse(targetQueue.getNonRunnableAppSchedulables().contains(app)); assertTrue(targetQueue.getRunnableAppSchedulables().contains(app)); assertEquals(1,targetQueue.getNumRunnableApps()); assertEquals(1,queueMgr.getRootQueue().getNumRunnableApps()); }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testCancelStrictLocality() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",0); ResourceRequest nodeRequest=createResourceRequest(1024,node1.getHostName(),1,1,true); ResourceRequest rackRequest=createResourceRequest(1024,"rack1",1,1,false); ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false); createSchedulingRequestExistingApplication(nodeRequest,attId1); createSchedulingRequestExistingApplication(rackRequest,attId1); createSchedulingRequestExistingApplication(anyRequest,attId1); scheduler.update(); NodeUpdateSchedulerEvent node2UpdateEvent=new NodeUpdateSchedulerEvent(node2); FSAppAttempt app=scheduler.getSchedulerApp(attId1); for (int i=0; i < 10; i++) { scheduler.handle(node2UpdateEvent); assertEquals(0,app.getLiveContainers().size()); } List update=Arrays.asList(createResourceRequest(1024,node1.getHostName(),1,0,true),createResourceRequest(1024,"rack1",1,0,true),createResourceRequest(1024,ResourceRequest.ANY,1,1,true)); scheduler.allocate(attId1,update,new ArrayList(),null,null); scheduler.handle(node2UpdateEvent); assertEquals(1,app.getLiveContainers().size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testQueueMaxAMShare() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("0.2"); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(20480,20),0,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); scheduler.update(); FSLeafQueue queue1=scheduler.getQueueManager().getLeafQueue("queue1",true); assertEquals("Queue queue1's fair share should be 0",0,queue1.getFairShare().getMemory()); createSchedulingRequest(1 * 1024,"root.default","user1"); scheduler.update(); scheduler.handle(updateEvent); Resource amResource1=Resource.newInstance(1024,1); Resource amResource2=Resource.newInstance(2048,2); Resource amResource3=Resource.newInstance(1860,2); int amPriority=RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority(); ApplicationAttemptId attId1=createAppAttemptId(1,1); createApplicationWithAMResource(attId1,"queue1","user1",amResource1); createSchedulingRequestExistingApplication(1024,1,amPriority,attId1); FSAppAttempt app1=scheduler.getSchedulerApp(attId1); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application1's AM requests 1024 MB memory",1024,app1.getAMResource().getMemory()); assertEquals("Application1's AM should be running",1,app1.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 1024 MB memory",1024,queue1.getAmResourceUsage().getMemory()); ApplicationAttemptId attId2=createAppAttemptId(2,1); createApplicationWithAMResource(attId2,"queue1","user1",amResource1); createSchedulingRequestExistingApplication(1024,1,amPriority,attId2); FSAppAttempt app2=scheduler.getSchedulerApp(attId2); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application2's AM requests 1024 MB memory",1024,app2.getAMResource().getMemory()); assertEquals("Application2's AM should be running",1,app2.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory()); ApplicationAttemptId attId3=createAppAttemptId(3,1); createApplicationWithAMResource(attId3,"queue1","user1",amResource1); createSchedulingRequestExistingApplication(1024,1,amPriority,attId3); FSAppAttempt app3=scheduler.getSchedulerApp(attId3); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application3's AM requests 1024 MB memory",1024,app3.getAMResource().getMemory()); assertEquals("Application3's AM should not be running",0,app3.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory()); createSchedulingRequestExistingApplication(1024,1,attId1); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application1 should have two running containers",2,app1.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory()); AppAttemptRemovedSchedulerEvent appRemovedEvent1=new AppAttemptRemovedSchedulerEvent(attId1,RMAppAttemptState.FINISHED,false); scheduler.update(); scheduler.handle(appRemovedEvent1); scheduler.handle(updateEvent); assertEquals("Application1's AM should be finished",0,app1.getLiveContainers().size()); assertEquals("Application3's AM should be running",1,app3.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory()); ApplicationAttemptId attId4=createAppAttemptId(4,1); createApplicationWithAMResource(attId4,"queue1","user1",amResource2); createSchedulingRequestExistingApplication(2048,2,amPriority,attId4); FSAppAttempt app4=scheduler.getSchedulerApp(attId4); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application4's AM requests 2048 MB memory",2048,app4.getAMResource().getMemory()); assertEquals("Application4's AM should not be running",0,app4.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory()); ApplicationAttemptId attId5=createAppAttemptId(5,1); createApplicationWithAMResource(attId5,"queue1","user1",amResource2); createSchedulingRequestExistingApplication(2048,2,amPriority,attId5); FSAppAttempt app5=scheduler.getSchedulerApp(attId5); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application5's AM requests 2048 MB memory",2048,app5.getAMResource().getMemory()); assertEquals("Application5's AM should not be running",0,app5.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory()); AppAttemptRemovedSchedulerEvent appRemovedEvent4=new AppAttemptRemovedSchedulerEvent(attId4,RMAppAttemptState.KILLED,false); scheduler.handle(appRemovedEvent4); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application5's AM should not be running",0,app5.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory()); AppAttemptRemovedSchedulerEvent appRemovedEvent2=new AppAttemptRemovedSchedulerEvent(attId2,RMAppAttemptState.FINISHED,false); AppAttemptRemovedSchedulerEvent appRemovedEvent3=new AppAttemptRemovedSchedulerEvent(attId3,RMAppAttemptState.FINISHED,false); scheduler.handle(appRemovedEvent2); scheduler.handle(appRemovedEvent3); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application2's AM should be finished",0,app2.getLiveContainers().size()); assertEquals("Application3's AM should be finished",0,app3.getLiveContainers().size()); assertEquals("Application5's AM should be running",1,app5.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory()); ApplicationAttemptId attId6=createAppAttemptId(6,1); createApplicationWithAMResource(attId6,"queue1","user1",amResource3); createSchedulingRequestExistingApplication(1860,2,amPriority,attId6); FSAppAttempt app6=scheduler.getSchedulerApp(attId6); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application6's AM should not be running",0,app6.getLiveContainers().size()); assertEquals("Application6's AM requests 2048 MB memory",2048,app6.getAMResource().getMemory()); assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory()); AppAttemptRemovedSchedulerEvent appRemovedEvent5=new AppAttemptRemovedSchedulerEvent(attId5,RMAppAttemptState.FINISHED,false); AppAttemptRemovedSchedulerEvent appRemovedEvent6=new AppAttemptRemovedSchedulerEvent(attId6,RMAppAttemptState.FINISHED,false); scheduler.handle(appRemovedEvent5); scheduler.handle(appRemovedEvent6); scheduler.update(); assertEquals("Queue1's AM resource usage should be 0",0,queue1.getAmResourceUsage().getMemory()); }

    APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
    /** * Test to verify the behavior of{@link FSQueue#assignContainer(FSSchedulerNode)}) * Create two queues under root (fifoQueue and fairParent), and two queues * under fairParent (fairChild1 and fairChild2). Submit two apps to the * fifoQueue and one each to the fairChild* queues, all apps requiring 4 * containers each of the total 16 container capacity * Assert the number of containers for each app after 4, 8, 12 and 16 updates. * @throws Exception */ @Test(timeout=5000) public void testAssignContainer() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); final String user="user1"; final String fifoQueue="fifo"; final String fairParent="fairParent"; final String fairChild1=fairParent + ".fairChild1"; final String fairChild2=fairParent + ".fairChild2"; RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8192,8),1,"127.0.0.1"); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8192,8),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent1); scheduler.handle(nodeEvent2); ApplicationAttemptId attId1=createSchedulingRequest(1024,fifoQueue,user,4); ApplicationAttemptId attId2=createSchedulingRequest(1024,fairChild1,user,4); ApplicationAttemptId attId3=createSchedulingRequest(1024,fairChild2,user,4); ApplicationAttemptId attId4=createSchedulingRequest(1024,fifoQueue,user,4); FSAppAttempt app1=scheduler.getSchedulerApp(attId1); FSAppAttempt app2=scheduler.getSchedulerApp(attId2); FSAppAttempt app3=scheduler.getSchedulerApp(attId3); FSAppAttempt app4=scheduler.getSchedulerApp(attId4); scheduler.getQueueManager().getLeafQueue(fifoQueue,true).setPolicy(SchedulingPolicy.parse("fifo")); scheduler.update(); NodeUpdateSchedulerEvent updateEvent1=new NodeUpdateSchedulerEvent(node1); NodeUpdateSchedulerEvent updateEvent2=new NodeUpdateSchedulerEvent(node2); for (int i=0; i < 8; i++) { scheduler.handle(updateEvent1); scheduler.handle(updateEvent2); if ((i + 1) % 2 == 0) { String ERR="Wrong number of assigned containers after " + (i + 1) + " updates"; if (i < 4) { assertEquals(ERR,(i + 1),app1.getLiveContainers().size()); assertEquals(ERR,0,app4.getLiveContainers().size()); } else { assertEquals(ERR,4,app1.getLiveContainers().size()); assertEquals(ERR,(i - 3),app4.getLiveContainers().size()); } assertEquals(ERR,(i + 1) / 2,app2.getLiveContainers().size()); assertEquals(ERR,(i + 1) / 2,app3.getLiveContainers().size()); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testLowestCommonAncestorDeeperHierarchy() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); FSQueue aQueue=mock(FSLeafQueue.class); FSQueue bQueue=mock(FSLeafQueue.class); FSQueue a1Queue=mock(FSLeafQueue.class); FSQueue b1Queue=mock(FSLeafQueue.class); when(a1Queue.getName()).thenReturn("root.queue1.a.a1"); when(b1Queue.getName()).thenReturn("root.queue1.b.b1"); when(aQueue.getChildQueues()).thenReturn(Arrays.asList(a1Queue)); when(bQueue.getChildQueues()).thenReturn(Arrays.asList(b1Queue)); QueueManager queueManager=scheduler.getQueueManager(); FSParentQueue queue1=queueManager.getParentQueue("queue1",true); queue1.addChildQueue(aQueue); queue1.addChildQueue(bQueue); FSQueue ancestorQueue=scheduler.findLowestCommonAncestorQueue(a1Queue,b1Queue); assertEquals(ancestorQueue,queue1); }

    UtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testContinuousSchedulingWithNodeRemoved() throws Exception { scheduler.init(conf); scheduler.start(); Assert.assertTrue("Continuous scheduling should be disabled.",!scheduler.isContinuousSchedulingEnabled()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); Assert.assertEquals("We should have two alive nodes.",2,scheduler.getNumClusterNodes()); NodeRemovedSchedulerEvent removeNode1=new NodeRemovedSchedulerEvent(node1); scheduler.handle(removeNode1); Assert.assertEquals("We should only have one alive node.",1,scheduler.getNumClusterNodes()); try { scheduler.continuousSchedulingAttempt(); } catch ( Exception e) { fail("Exception happened when doing continuous scheduling. " + e.toString()); } }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testStrictLocality() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",0); ResourceRequest nodeRequest=createResourceRequest(1024,node1.getHostName(),1,1,true); ResourceRequest rackRequest=createResourceRequest(1024,node1.getRackName(),1,1,false); ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false); createSchedulingRequestExistingApplication(nodeRequest,attId1); createSchedulingRequestExistingApplication(rackRequest,attId1); createSchedulingRequestExistingApplication(anyRequest,attId1); scheduler.update(); NodeUpdateSchedulerEvent node1UpdateEvent=new NodeUpdateSchedulerEvent(node1); NodeUpdateSchedulerEvent node2UpdateEvent=new NodeUpdateSchedulerEvent(node2); FSAppAttempt app=scheduler.getSchedulerApp(attId1); for (int i=0; i < 10; i++) { scheduler.handle(node2UpdateEvent); assertEquals(0,app.getLiveContainers().size()); assertEquals(0,app.getReservedContainers().size()); } scheduler.handle(node1UpdateEvent); assertEquals(1,app.getLiveContainers().size()); }

    BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFairShareWithMinAlloc() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(3 * 1024),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); createSchedulingRequest(2 * 1024,"queueA","user1"); createSchedulingRequest(2 * 1024,"queueB","user1"); scheduler.update(); Collection queues=scheduler.getQueueManager().getLeafQueues(); assertEquals(3,queues.size()); for ( FSLeafQueue p : queues) { if (p.getName().equals("root.queueA")) { assertEquals(1024,p.getFairShare().getMemory()); } else if (p.getName().equals("root.queueB")) { assertEquals(2048,p.getFairShare().getMemory()); } } }

    BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFairShareAndWeightsInNestedUserQueueRule() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println(""); out.println(" "); out.println(""); out.println(""); out.println(""); out.println(""); out.close(); RMApp rmApp1=new MockRMApp(0,0,RMAppState.NEW); RMApp rmApp2=new MockRMApp(1,1,RMAppState.NEW); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); int capacity=16 * 1024; RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(capacity),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); createSchedulingRequest(10 * 1024,"root.parentq","user1"); createSchedulingRequest(10 * 1024,"root.parentq","user2"); createSchedulingRequest(10 * 1024,"root.default","user3"); scheduler.update(); scheduler.getQueueManager().getRootQueue().setSteadyFairShare(scheduler.getClusterResource()); scheduler.getQueueManager().getRootQueue().recomputeSteadyShares(); Collection leafQueues=scheduler.getQueueManager().getLeafQueues(); for ( FSLeafQueue leaf : leafQueues) { if (leaf.getName().equals("root.parentq.user1") || leaf.getName().equals("root.parentq.user2")) { assertEquals(capacity / 4,leaf.getFairShare().getMemory()); assertEquals(capacity / 4,leaf.getSteadyFairShare().getMemory()); assertEquals(1.0,leaf.getWeights().getWeight(ResourceType.MEMORY),0); } } }

    EqualityVerifier 
    @Test public void testMaxRunningAppsHierarchicalQueues() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); MockClock clock=new MockClock(); scheduler.setClock(clock); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println(" 3"); out.println(" "); out.println(" "); out.println(" "); out.println(" 1"); out.println(" "); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1.sub1","user1"); verifyAppRunnable(attId1,true); verifyQueueNumRunnable("queue1.sub1",1,0); clock.tick(10); ApplicationAttemptId attId2=createSchedulingRequest(1024,"queue1.sub3","user1"); verifyAppRunnable(attId2,true); verifyQueueNumRunnable("queue1.sub3",1,0); clock.tick(10); ApplicationAttemptId attId3=createSchedulingRequest(1024,"queue1.sub2","user1"); verifyAppRunnable(attId3,true); verifyQueueNumRunnable("queue1.sub2",1,0); clock.tick(10); ApplicationAttemptId attId4=createSchedulingRequest(1024,"queue1.sub2","user1"); verifyAppRunnable(attId4,false); verifyQueueNumRunnable("queue1.sub2",1,1); clock.tick(10); ApplicationAttemptId attId5=createSchedulingRequest(1024,"queue1.sub3","user1"); verifyAppRunnable(attId5,false); verifyQueueNumRunnable("queue1.sub3",1,1); clock.tick(10); AppAttemptRemovedSchedulerEvent appRemovedEvent1=new AppAttemptRemovedSchedulerEvent(attId2,RMAppAttemptState.FINISHED,false); scheduler.handle(appRemovedEvent1); verifyAppRunnable(attId4,true); verifyQueueNumRunnable("queue1.sub2",2,0); verifyAppRunnable(attId5,false); verifyQueueNumRunnable("queue1.sub3",0,1); AppAttemptRemovedSchedulerEvent appRemovedEvent2=new AppAttemptRemovedSchedulerEvent(attId5,RMAppAttemptState.KILLED,true); scheduler.handle(appRemovedEvent2); assertEquals(0,scheduler.maxRunningEnforcer.usersNonRunnableApps.get("user1").size()); verifyQueueNumRunnable("queue1.sub3",0,0); AppAttemptRemovedSchedulerEvent appRemovedEvent3=new AppAttemptRemovedSchedulerEvent(attId4,RMAppAttemptState.FINISHED,true); scheduler.handle(appRemovedEvent3); verifyQueueNumRunnable("queue1.sub2",1,0); verifyQueueNumRunnable("queue1.sub3",0,0); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testPreemptionIsNotDelayedToNextRound() throws Exception { conf.setLong(FairSchedulerConfiguration.PREEMPTION_INTERVAL,5000); conf.setLong(FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,10000); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"false"); MockClock clock=new MockClock(); scheduler.setClock(clock); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("8"); out.println(""); out.println(""); out.println(""); out.println(""); out.println("2"); out.println(""); out.print("10"); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); ApplicationAttemptId app1=createSchedulingRequest(1 * 1024,1,"queueA.queueA1","user1",7,1); ApplicationAttemptId app2=createSchedulingRequest(1 * 1024,1,"queueB","user2",1,1); scheduler.update(); NodeUpdateSchedulerEvent nodeUpdate1=new NodeUpdateSchedulerEvent(node1); for (int i=0; i < 8; i++) { scheduler.handle(nodeUpdate1); } assertEquals(7,scheduler.getSchedulerApp(app1).getLiveContainers().size()); assertEquals(1,scheduler.getSchedulerApp(app2).getLiveContainers().size()); ApplicationAttemptId app3=createSchedulingRequest(1 * 1024,1,"queueA.queueA2","user3",7,1); scheduler.update(); clock.tick(11); scheduler.update(); Resource toPreempt=scheduler.resToPreempt(scheduler.getQueueManager().getLeafQueue("queueA.queueA2",false),clock.getTime()); assertEquals(3277,toPreempt.getMemory()); scheduler.preemptResources(toPreempt); assertEquals(3,scheduler.getSchedulerApp(app1).getPreemptionContainers().size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testDRFHierarchicalQueues() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node=MockNodes.newNodeInfo(1,BuilderUtils.newResource(12288,12),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); scheduler.handle(nodeEvent); ApplicationAttemptId appAttId1=createSchedulingRequest(3074,1,"queue1.subqueue1","user1",2); Thread.sleep(3); FSAppAttempt app1=scheduler.getSchedulerApp(appAttId1); ApplicationAttemptId appAttId2=createSchedulingRequest(1024,3,"queue1.subqueue1","user1",2); Thread.sleep(3); FSAppAttempt app2=scheduler.getSchedulerApp(appAttId2); ApplicationAttemptId appAttId3=createSchedulingRequest(2048,2,"queue1.subqueue2","user1",2); Thread.sleep(3); FSAppAttempt app3=scheduler.getSchedulerApp(appAttId3); ApplicationAttemptId appAttId4=createSchedulingRequest(1024,2,"queue2","user1",2); Thread.sleep(3); FSAppAttempt app4=scheduler.getSchedulerApp(appAttId4); DominantResourceFairnessPolicy drfPolicy=new DominantResourceFairnessPolicy(); drfPolicy.initialize(scheduler.getClusterResource()); scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy); scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy); scheduler.getQueueManager().getQueue("queue1.subqueue1").setPolicy(drfPolicy); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(updateEvent); Assert.assertEquals(1,app1.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(1,app4.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(2,app4.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(1,app3.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(2,app3.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(1,app2.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(1,app1.getLiveContainers().size()); Assert.assertEquals(1,app2.getLiveContainers().size()); Assert.assertEquals(2,app3.getLiveContainers().size()); Assert.assertEquals(2,app4.getLiveContainers().size()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testContinuousScheduling() throws Exception { FairScheduler fs=new FairScheduler(); Configuration conf=createConfiguration(); conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,true); fs.setRMContext(resourceManager.getRMContext()); fs.init(conf); fs.start(); fs.reinitialize(conf,resourceManager.getRMContext()); Assert.assertTrue("Continuous scheduling should be enabled.",fs.isContinuousSchedulingEnabled()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); fs.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(8 * 1024,8),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); fs.handle(nodeEvent2); Assert.assertEquals(fs.getClusterResource().getMemory(),16 * 1024); Assert.assertEquals(fs.getClusterResource().getVirtualCores(),16); ApplicationAttemptId appAttemptId=createAppAttemptId(this.APP_ID++,this.ATTEMPT_ID++); fs.addApplication(appAttemptId.getApplicationId(),"queue11","user11",false); fs.addApplicationAttempt(appAttemptId,false,false); List ask=new ArrayList(); ResourceRequest request=createResourceRequest(1024,1,ResourceRequest.ANY,1,1,true); ask.add(request); fs.allocate(appAttemptId,ask,new ArrayList(),null,null); Thread.sleep(fs.getConf().getContinuousSchedulingSleepMs() + 500); FSAppAttempt app=fs.getSchedulerApp(appAttemptId); while (app.getCurrentConsumption().equals(Resources.none())) { } Assert.assertEquals(1024,app.getCurrentConsumption().getMemory()); Assert.assertEquals(1,app.getCurrentConsumption().getVirtualCores()); request=createResourceRequest(1024,1,ResourceRequest.ANY,2,1,true); ask.clear(); ask.add(request); fs.allocate(appAttemptId,ask,new ArrayList(),null,null); while (app.getCurrentConsumption().equals(Resources.createResource(1024,1))) { } Assert.assertEquals(2048,app.getCurrentConsumption().getMemory()); Assert.assertEquals(2,app.getCurrentConsumption().getVirtualCores()); Set nodes=new HashSet(); Iterator it=app.getLiveContainers().iterator(); while (it.hasNext()) { nodes.add(it.next().getContainer().getNodeId()); } Assert.assertEquals(2,nodes.size()); }

    BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testIsStarvedForFairShare() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println(".25"); out.println(""); out.println(""); out.println(".75"); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(4 * 1024,4),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); createSchedulingRequest(3 * 1024,"queueA","user1"); scheduler.update(); NodeUpdateSchedulerEvent nodeEvent2=new NodeUpdateSchedulerEvent(node1); scheduler.handle(nodeEvent2); createSchedulingRequest(1 * 1024,"queueB","user1"); scheduler.update(); Collection queues=scheduler.getQueueManager().getLeafQueues(); assertEquals(3,queues.size()); for ( FSLeafQueue p : queues) { if (p.getName().equals("root.queueA")) { assertEquals(false,scheduler.isStarvedForFairShare(p)); } else if (p.getName().equals("root.queueB")) { assertEquals(true,scheduler.isStarvedForFairShare(p)); } } scheduler.handle(nodeEvent2); for ( FSLeafQueue p : queues) { if (p.getName().equals("root.queueB")) { assertEquals(false,scheduler.isStarvedForFairShare(p)); } } }

    EqualityVerifier 
    @Test public void testNotUserAsDefaultQueue() throws Exception { conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"false"); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); ApplicationAttemptId appAttemptId=createAppAttemptId(1,1); createApplicationWithAMResource(appAttemptId,"default","user2",null); assertEquals(0,scheduler.getQueueManager().getLeafQueue("user1",true).getRunnableAppSchedulables().size()); assertEquals(1,scheduler.getQueueManager().getLeafQueue("default",true).getRunnableAppSchedulables().size()); assertEquals(0,scheduler.getQueueManager().getLeafQueue("user2",true).getRunnableAppSchedulables().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSteadyFairShareWithReloadAndNodeAddRemove() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println("fair"); out.println(""); out.println(" drf"); out.println(" "); out.println(" 1"); out.println(" "); out.println(" "); out.println(" 1"); out.println(" "); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueManager=scheduler.getQueueManager(); assertEquals(0,queueManager.getLeafQueue("child1",false).getSteadyFairShare().getMemory()); assertEquals(0,queueManager.getLeafQueue("child2",false).getSteadyFairShare().getMemory()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(6144),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); assertEquals(6144,scheduler.getClusterResource().getMemory()); assertEquals(2048,queueManager.getLeafQueue("child1",false).getSteadyFairShare().getMemory()); assertEquals(2048,queueManager.getLeafQueue("child2",false).getSteadyFairShare().getMemory()); out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println("fair"); out.println(""); out.println(" drf"); out.println(" "); out.println(" 1"); out.println(" "); out.println(" "); out.println(" 2"); out.println(" "); out.println(" "); out.println(" 2"); out.println(" "); out.println(""); out.println(""); out.close(); scheduler.reinitialize(conf,resourceManager.getRMContext()); assertEquals(1024,queueManager.getLeafQueue("child1",false).getSteadyFairShare().getMemory()); assertEquals(2048,queueManager.getLeafQueue("child2",false).getSteadyFairShare().getMemory()); assertEquals(2048,queueManager.getLeafQueue("child3",false).getSteadyFairShare().getMemory()); NodeRemovedSchedulerEvent nodeEvent2=new NodeRemovedSchedulerEvent(node1); scheduler.handle(nodeEvent2); assertEquals(0,scheduler.getClusterResource().getMemory()); assertEquals(0,queueManager.getLeafQueue("child1",false).getSteadyFairShare().getMemory()); assertEquals(0,queueManager.getLeafQueue("child2",false).getSteadyFairShare().getMemory()); }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetAppsInQueue() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); ApplicationAttemptId appAttId1=createSchedulingRequest(1024,1,"queue1.subqueue1","user1"); ApplicationAttemptId appAttId2=createSchedulingRequest(1024,1,"queue1.subqueue2","user1"); ApplicationAttemptId appAttId3=createSchedulingRequest(1024,1,"default","user1"); List apps=scheduler.getAppsInQueue("queue1.subqueue1"); assertEquals(1,apps.size()); assertEquals(appAttId1,apps.get(0)); apps=scheduler.getAppsInQueue("root.queue1.subqueue1"); assertEquals(1,apps.size()); assertEquals(appAttId1,apps.get(0)); apps=scheduler.getAppsInQueue("user1"); assertEquals(1,apps.size()); assertEquals(appAttId3,apps.get(0)); apps=scheduler.getAppsInQueue("root.user1"); assertEquals(1,apps.size()); assertEquals(appAttId3,apps.get(0)); apps=scheduler.getAppsInQueue("queue1"); Assert.assertEquals(2,apps.size()); Set appAttIds=Sets.newHashSet(apps.get(0),apps.get(1)); assertTrue(appAttIds.contains(appAttId1)); assertTrue(appAttIds.contains(appAttId2)); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNoMoreCpuOnNode() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(2048,1),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); ApplicationAttemptId attId=createSchedulingRequest(1024,1,"default","user1",2); FSAppAttempt app=scheduler.getSchedulerApp(attId); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); assertEquals(1,app.getLiveContainers().size()); scheduler.handle(updateEvent); assertEquals(1,app.getLiveContainers().size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testLowestCommonAncestorRootParent() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); FSLeafQueue aQueue=mock(FSLeafQueue.class); FSLeafQueue bQueue=mock(FSLeafQueue.class); when(aQueue.getName()).thenReturn("root.a"); when(bQueue.getName()).thenReturn("root.b"); QueueManager queueManager=scheduler.getQueueManager(); FSParentQueue queue1=queueManager.getParentQueue("root",false); queue1.addChildQueue(aQueue); queue1.addChildQueue(bQueue); FSQueue ancestorQueue=scheduler.findLowestCommonAncestorQueue(aQueue,bQueue); assertEquals(ancestorQueue,queue1); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testSimpleContainerAllocation() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024,4),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(512,2),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); createSchedulingRequest(512,2,"queue1","user1",2); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); assertEquals(FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB,scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getMemory()); NodeUpdateSchedulerEvent updateEvent2=new NodeUpdateSchedulerEvent(node2); scheduler.handle(updateEvent2); assertEquals(1024,scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getMemory()); assertEquals(2,scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getVirtualCores()); QueueMetrics queue1Metrics=scheduler.getQueueManager().getQueue("queue1").getMetrics(); assertEquals(1024,queue1Metrics.getAllocatedMB()); assertEquals(2,queue1Metrics.getAllocatedVirtualCores()); assertEquals(1024,scheduler.getRootQueueMetrics().getAllocatedMB()); assertEquals(2,scheduler.getRootQueueMetrics().getAllocatedVirtualCores()); assertEquals(512,scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(4,scheduler.getRootQueueMetrics().getAvailableVirtualCores()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @SuppressWarnings("unchecked") @Test public void testNotAllowSubmitApplication() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println(" "); out.println(" "); out.println(" "); out.println(" userallow"); out.println(" userallow"); out.println(" "); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); int appId=this.APP_ID++; String user="usernotallow"; String queue="queue1"; ApplicationId applicationId=MockApps.newAppID(appId); String name=MockApps.newAppName(); ApplicationMasterService masterService=new ApplicationMasterService(resourceManager.getRMContext(),scheduler); ApplicationSubmissionContext submissionContext=new ApplicationSubmissionContextPBImpl(); ContainerLaunchContext clc=BuilderUtils.newContainerLaunchContext(null,null,null,null,null,null); submissionContext.setApplicationId(applicationId); submissionContext.setAMContainerSpec(clc); RMApp application=new RMAppImpl(applicationId,resourceManager.getRMContext(),conf,name,user,queue,submissionContext,scheduler,masterService,System.currentTimeMillis(),"YARN",null); resourceManager.getRMContext().getRMApps().putIfAbsent(applicationId,application); application.handle(new RMAppEvent(applicationId,RMAppEventType.START)); final int MAX_TRIES=20; int numTries=0; while (!application.getState().equals(RMAppState.SUBMITTED) && numTries < MAX_TRIES) { try { Thread.sleep(100); } catch ( InterruptedException ex) { ex.printStackTrace(); } numTries++; } assertEquals("The application doesn't reach SUBMITTED.",RMAppState.SUBMITTED,application.getState()); ApplicationAttemptId attId=ApplicationAttemptId.newInstance(applicationId,this.ATTEMPT_ID++); scheduler.addApplication(attId.getApplicationId(),queue,user,false); numTries=0; while (application.getFinishTime() == 0 && numTries < MAX_TRIES) { try { Thread.sleep(100); } catch ( InterruptedException ex) { ex.printStackTrace(); } numTries++; } assertEquals(FinalApplicationStatus.FAILED,application.getFinalApplicationStatus()); }

    EqualityVerifier 
    @Test(timeout=5000) public void testSimpleContainerReservation() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); createSchedulingRequest(1024,"queue1","user1",1); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); assertEquals(1024,scheduler.getQueueManager().getQueue("queue1").getResourceUsage().getMemory()); ApplicationAttemptId attId=createSchedulingRequest(1024,"queue2","user1",1); scheduler.update(); scheduler.handle(updateEvent); assertEquals(0,scheduler.getQueueManager().getQueue("queue2").getResourceUsage().getMemory()); assertEquals(1024,scheduler.getSchedulerApp(attId).getCurrentReservation().getMemory()); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); NodeUpdateSchedulerEvent updateEvent2=new NodeUpdateSchedulerEvent(node2); scheduler.handle(nodeEvent2); scheduler.handle(updateEvent2); assertEquals(1024,scheduler.getQueueManager().getQueue("queue2").getResourceUsage().getMemory()); assertEquals(1024,scheduler.getSchedulerApp(attId).getCurrentReservation().getMemory()); scheduler.handle(updateEvent); assertEquals(0,scheduler.getSchedulerApp(attId).getCurrentReservation().getMemory()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testDontAllowUndeclaredPools() throws Exception { conf.setBoolean(FairSchedulerConfiguration.ALLOW_UNDECLARED_POOLS,false); conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueManager=scheduler.getQueueManager(); FSLeafQueue jerryQueue=queueManager.getLeafQueue("jerry",false); FSLeafQueue defaultQueue=queueManager.getLeafQueue("default",false); createSchedulingRequest(1024,"jerry","someuser"); assertEquals(1,jerryQueue.getRunnableAppSchedulables().size()); createSchedulingRequest(1024,"newqueue","someuser"); assertEquals(1,jerryQueue.getRunnableAppSchedulables().size()); assertEquals(1,defaultQueue.getRunnableAppSchedulables().size()); createSchedulingRequest(1024,"default","someuser"); assertEquals(1,jerryQueue.getRunnableAppSchedulables().size()); assertEquals(2,defaultQueue.getRunnableAppSchedulables().size()); createSchedulingRequest(1024,"default","jerry"); assertEquals(2,jerryQueue.getRunnableAppSchedulables().size()); assertEquals(2,defaultQueue.getRunnableAppSchedulables().size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * If we update our ask to strictly request a node, it doesn't make sense to keep * a reservation on another. */ @Test public void testReservationsStrictLocality() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1"); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent2); ApplicationAttemptId attId=createSchedulingRequest(1024,"queue1","user1",0); FSAppAttempt app=scheduler.getSchedulerApp(attId); ResourceRequest nodeRequest=createResourceRequest(1024,node2.getHostName(),1,2,true); ResourceRequest rackRequest=createResourceRequest(1024,"rack1",1,2,true); ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,2,false); createSchedulingRequestExistingApplication(nodeRequest,attId); createSchedulingRequestExistingApplication(rackRequest,attId); createSchedulingRequestExistingApplication(anyRequest,attId); scheduler.update(); NodeUpdateSchedulerEvent nodeUpdateEvent=new NodeUpdateSchedulerEvent(node1); scheduler.handle(nodeUpdateEvent); assertEquals(1,app.getLiveContainers().size()); scheduler.handle(nodeUpdateEvent); assertEquals(1,app.getReservedContainers().size()); rackRequest=createResourceRequest(1024,"rack1",1,1,false); anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false); scheduler.allocate(attId,Arrays.asList(rackRequest,anyRequest),new ArrayList(),null,null); scheduler.handle(nodeUpdateEvent); assertEquals(0,app.getReservedContainers().size()); }

    EqualityVerifier 
    @Test public void testSteadyFairShareWithQueueCreatedRuntime() throws Exception { conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,SimpleGroupsMapping.class,GroupMappingServiceProvider.class); conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"true"); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(6144),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); assertEquals(6144,scheduler.getClusterResource().getMemory()); assertEquals(6144,scheduler.getQueueManager().getRootQueue().getSteadyFairShare().getMemory()); assertEquals(6144,scheduler.getQueueManager().getLeafQueue("default",false).getSteadyFairShare().getMemory()); ApplicationAttemptId appAttemptId1=createAppAttemptId(1,1); createApplicationWithAMResource(appAttemptId1,"default","user1",null); assertEquals(3072,scheduler.getQueueManager().getLeafQueue("default",false).getSteadyFairShare().getMemory()); assertEquals(3072,scheduler.getQueueManager().getLeafQueue("user1",false).getSteadyFairShare().getMemory()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * Two apps on one queue, one app on another */ @Test public void testBasicDRFWithQueues() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node=MockNodes.newNodeInfo(1,BuilderUtils.newResource(8192,7),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); scheduler.handle(nodeEvent); ApplicationAttemptId appAttId1=createSchedulingRequest(3072,1,"queue1","user1",2); FSAppAttempt app1=scheduler.getSchedulerApp(appAttId1); ApplicationAttemptId appAttId2=createSchedulingRequest(2048,2,"queue1","user1",2); FSAppAttempt app2=scheduler.getSchedulerApp(appAttId2); ApplicationAttemptId appAttId3=createSchedulingRequest(1024,2,"queue2","user1",2); FSAppAttempt app3=scheduler.getSchedulerApp(appAttId3); DominantResourceFairnessPolicy drfPolicy=new DominantResourceFairnessPolicy(); drfPolicy.initialize(scheduler.getClusterResource()); scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy); scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(updateEvent); Assert.assertEquals(1,app1.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(1,app3.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(2,app3.getLiveContainers().size()); scheduler.handle(updateEvent); Assert.assertEquals(1,app2.getLiveContainers().size()); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=5000) public void testPreemptionDecision() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); MockClock clock=new MockClock(); scheduler.setClock(clock); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("0mb,0vcores"); out.println(""); out.println(""); out.println(".25"); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println(".25"); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println(".25"); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println(".25"); out.println("1024mb,0vcores"); out.println(""); out.print("5"); out.print("10"); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); RMNode node3=MockNodes.newNodeInfo(1,Resources.createResource(2 * 1024,2),3,"127.0.0.3"); NodeAddedSchedulerEvent nodeEvent3=new NodeAddedSchedulerEvent(node3); scheduler.handle(nodeEvent3); ApplicationAttemptId app1=createSchedulingRequest(1 * 1024,"queueA","user1",1,1); ApplicationAttemptId app2=createSchedulingRequest(1 * 1024,"queueA","user1",1,2); ApplicationAttemptId app3=createSchedulingRequest(1 * 1024,"queueA","user1",1,3); ApplicationAttemptId app4=createSchedulingRequest(1 * 1024,"queueB","user1",1,1); ApplicationAttemptId app5=createSchedulingRequest(1 * 1024,"queueB","user1",1,2); ApplicationAttemptId app6=createSchedulingRequest(1 * 1024,"queueB","user1",1,3); scheduler.update(); for (int i=0; i < 2; i++) { NodeUpdateSchedulerEvent nodeUpdate1=new NodeUpdateSchedulerEvent(node1); scheduler.handle(nodeUpdate1); NodeUpdateSchedulerEvent nodeUpdate2=new NodeUpdateSchedulerEvent(node2); scheduler.handle(nodeUpdate2); NodeUpdateSchedulerEvent nodeUpdate3=new NodeUpdateSchedulerEvent(node3); scheduler.handle(nodeUpdate3); } ApplicationAttemptId app7=createSchedulingRequest(1 * 1024,"queueC","user1",1,1); ApplicationAttemptId app8=createSchedulingRequest(1 * 1024,"queueC","user1",1,2); ApplicationAttemptId app9=createSchedulingRequest(1 * 1024,"queueC","user1",1,3); ApplicationAttemptId app10=createSchedulingRequest(1 * 1024,"queueD","user1",1,1); ApplicationAttemptId app11=createSchedulingRequest(1 * 1024,"queueD","user1",1,2); ApplicationAttemptId app12=createSchedulingRequest(1 * 1024,"queueD","user1",1,3); scheduler.update(); FSLeafQueue schedC=scheduler.getQueueManager().getLeafQueue("queueC",true); FSLeafQueue schedD=scheduler.getQueueManager().getLeafQueue("queueD",true); assertTrue(Resources.equals(Resources.none(),scheduler.resToPreempt(schedC,clock.getTime()))); assertTrue(Resources.equals(Resources.none(),scheduler.resToPreempt(schedD,clock.getTime()))); clock.tick(6); assertEquals(1024,scheduler.resToPreempt(schedC,clock.getTime()).getMemory()); assertEquals(1024,scheduler.resToPreempt(schedD,clock.getTime()).getMemory()); scheduler.update(); clock.tick(6); assertEquals(1536,scheduler.resToPreempt(schedC,clock.getTime()).getMemory()); assertEquals(1536,scheduler.resToPreempt(schedD,clock.getTime()).getMemory()); }

    EqualityVerifier 
    @Test public void testUserAsDefaultQueue() throws Exception { conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"true"); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); ApplicationAttemptId appAttemptId=createAppAttemptId(1,1); createApplicationWithAMResource(appAttemptId,"default","user1",null); assertEquals(1,scheduler.getQueueManager().getLeafQueue("user1",true).getRunnableAppSchedulables().size()); assertEquals(0,scheduler.getQueueManager().getLeafQueue("default",true).getRunnableAppSchedulables().size()); assertEquals("root.user1",resourceManager.getRMContext().getRMApps().get(appAttemptId.getApplicationId()).getQueue()); }

    BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testIsStarvedForMinShare() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(4 * 1024,4),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); createSchedulingRequest(3 * 1024,"queueA","user1"); scheduler.update(); NodeUpdateSchedulerEvent nodeEvent2=new NodeUpdateSchedulerEvent(node1); scheduler.handle(nodeEvent2); createSchedulingRequest(1 * 1024,"queueB","user1"); scheduler.update(); Collection queues=scheduler.getQueueManager().getLeafQueues(); assertEquals(3,queues.size()); for ( FSLeafQueue p : queues) { if (p.getName().equals("root.queueA")) { assertEquals(false,scheduler.isStarvedForMinShare(p)); } else if (p.getName().equals("root.queueB")) { assertEquals(true,scheduler.isStarvedForMinShare(p)); } } scheduler.handle(nodeEvent2); for ( FSLeafQueue p : queues) { if (p.getName().equals("root.queueB")) { assertEquals(false,scheduler.isStarvedForMinShare(p)); } } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=3000) public void testMaxAssign() throws Exception { conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,true); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16384,16),0,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); ApplicationAttemptId attId=createSchedulingRequest(1024,"root.default","user",8); FSAppAttempt app=scheduler.getSchedulerApp(attId); scheduler.maxAssign=2; scheduler.update(); scheduler.handle(updateEvent); assertEquals("Incorrect number of containers allocated",2,app.getLiveContainers().size()); scheduler.maxAssign=-1; scheduler.update(); scheduler.handle(updateEvent); assertEquals("Incorrect number of containers allocated",8,app.getLiveContainers().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSimpleFairShareCalculation() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(10 * 1024),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); createSchedulingRequest(10 * 1024,"queue1","user1"); createSchedulingRequest(10 * 1024,"queue2","user1"); createSchedulingRequest(10 * 1024,"root.default","user1"); scheduler.update(); scheduler.getQueueManager().getRootQueue().setSteadyFairShare(scheduler.getClusterResource()); scheduler.getQueueManager().getRootQueue().recomputeSteadyShares(); Collection queues=scheduler.getQueueManager().getLeafQueues(); assertEquals(3,queues.size()); for ( FSLeafQueue p : queues) { assertEquals(3414,p.getFairShare().getMemory()); assertEquals(3414,p.getMetrics().getFairShareMB()); assertEquals(3414,p.getSteadyFairShare().getMemory()); assertEquals(3414,p.getMetrics().getSteadyFairShareMB()); } }

    APIUtilityVerifier IterativeVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=30000) public void testHostPortNodeName() throws Exception { conf.setBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME,true); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1",1); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.1",2); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",0); ResourceRequest nodeRequest=createResourceRequest(1024,node1.getNodeID().getHost() + ":" + node1.getNodeID().getPort(),1,1,true); ResourceRequest rackRequest=createResourceRequest(1024,node1.getRackName(),1,1,false); ResourceRequest anyRequest=createResourceRequest(1024,ResourceRequest.ANY,1,1,false); createSchedulingRequestExistingApplication(nodeRequest,attId1); createSchedulingRequestExistingApplication(rackRequest,attId1); createSchedulingRequestExistingApplication(anyRequest,attId1); scheduler.update(); NodeUpdateSchedulerEvent node1UpdateEvent=new NodeUpdateSchedulerEvent(node1); NodeUpdateSchedulerEvent node2UpdateEvent=new NodeUpdateSchedulerEvent(node2); FSAppAttempt app=scheduler.getSchedulerApp(attId1); for (int i=0; i < 10; i++) { scheduler.handle(node2UpdateEvent); assertEquals(0,app.getLiveContainers().size()); assertEquals(0,app.getReservedContainers().size()); } scheduler.handle(node1UpdateEvent); assertEquals(1,app.getLiveContainers().size()); }

    EqualityVerifier 
    @Test public void testAppAdditionAndRemoval() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); ApplicationAttemptId attemptId=createAppAttemptId(1,1); AppAddedSchedulerEvent appAddedEvent=new AppAddedSchedulerEvent(attemptId.getApplicationId(),"default","user1"); scheduler.handle(appAddedEvent); AppAttemptAddedSchedulerEvent attemptAddedEvent=new AppAttemptAddedSchedulerEvent(createAppAttemptId(1,1),false); scheduler.handle(attemptAddedEvent); assertEquals(2,scheduler.getQueueManager().getLeafQueues().size()); assertEquals(1,scheduler.getQueueManager().getLeafQueue("user1",true).getRunnableAppSchedulables().size()); AppAttemptRemovedSchedulerEvent appRemovedEvent1=new AppAttemptRemovedSchedulerEvent(createAppAttemptId(1,1),RMAppAttemptState.FINISHED,false); scheduler.handle(appRemovedEvent1); assertEquals(0,scheduler.getQueueManager().getLeafQueue("user1",true).getRunnableAppSchedulables().size()); }

    EqualityVerifier 
    /** * Make allocation requests and ensure they are reflected in queue demand. */ @Test public void testQueueDemandCalculation() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); ApplicationAttemptId id11=createAppAttemptId(1,1); scheduler.addApplication(id11.getApplicationId(),"root.queue1","user1",false); scheduler.addApplicationAttempt(id11,false,false); ApplicationAttemptId id21=createAppAttemptId(2,1); scheduler.addApplication(id21.getApplicationId(),"root.queue2","user1",false); scheduler.addApplicationAttempt(id21,false,false); ApplicationAttemptId id22=createAppAttemptId(2,2); scheduler.addApplication(id22.getApplicationId(),"root.queue2","user1",false); scheduler.addApplicationAttempt(id22,false,false); int minReqSize=FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB; List ask1=new ArrayList(); ResourceRequest request1=createResourceRequest(minReqSize * 2,ResourceRequest.ANY,1,1,true); ask1.add(request1); scheduler.allocate(id11,ask1,new ArrayList(),null,null); List ask2=new ArrayList(); ResourceRequest request2=createResourceRequest(2 * minReqSize,"foo",1,1,false); ResourceRequest request3=createResourceRequest(minReqSize,"bar",1,2,false); ask2.add(request2); ask2.add(request3); scheduler.allocate(id21,ask2,new ArrayList(),null,null); List ask3=new ArrayList(); ResourceRequest request4=createResourceRequest(2 * minReqSize,ResourceRequest.ANY,1,1,true); ask3.add(request4); scheduler.allocate(id22,ask3,new ArrayList(),null,null); scheduler.update(); assertEquals(2 * minReqSize,scheduler.getQueueManager().getQueue("root.queue1").getDemand().getMemory()); assertEquals(2 * minReqSize + 2 * minReqSize + (2 * minReqSize),scheduler.getQueueManager().getQueue("root.queue2").getDemand().getMemory()); }

    EqualityVerifier 
    @Test(timeout=5000) public void testUserMaxRunningApps() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("1"); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(8192,8),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",1); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); assertEquals(1,scheduler.getSchedulerApp(attId1).getLiveContainers().size()); ApplicationAttemptId attId2=createSchedulingRequest(1024,"queue1","user1",1); scheduler.update(); scheduler.handle(updateEvent); assertEquals(0,scheduler.getSchedulerApp(attId2).getLiveContainers().size()); createSchedulingRequestExistingApplication(1024,1,attId1); scheduler.update(); scheduler.handle(updateEvent); assertEquals(2,scheduler.getSchedulerApp(attId1).getLiveContainers().size()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @SuppressWarnings("resource") @Test public void testBlacklistNodes() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); final int GB=1024; String host="127.0.0.1"; RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16 * GB,16),0,host); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); ApplicationAttemptId appAttemptId=createSchedulingRequest(GB,"root.default","user",1); FSAppAttempt app=scheduler.getSchedulerApp(appAttemptId); scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),Collections.singletonList(host),null); assertTrue(app.isBlacklisted(host)); scheduler.allocate(appAttemptId,Collections.emptyList(),Collections.emptyList(),null,Collections.singletonList(host)); assertFalse(scheduler.getSchedulerApp(appAttemptId).isBlacklisted(host)); List update=Arrays.asList(createResourceRequest(GB,node.getHostName(),1,0,true)); scheduler.allocate(appAttemptId,update,Collections.emptyList(),Collections.singletonList(host),null); assertTrue(app.isBlacklisted(host)); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Incorrect number of containers allocated",0,app.getLiveContainers().size()); scheduler.allocate(appAttemptId,update,Collections.emptyList(),null,Collections.singletonList(host)); assertFalse(app.isBlacklisted(host)); createSchedulingRequest(GB,"root.default","user",1); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Incorrect number of containers allocated",1,app.getLiveContainers().size()); }

    EqualityVerifier 
    @Test public void testPerfMetricsInited(){ scheduler.init(conf); scheduler.start(); MetricsCollectorImpl collector=new MetricsCollectorImpl(); scheduler.fsOpDurations.getMetrics(collector,true); assertEquals("Incorrect number of perf metrics",1,collector.getRecords().size()); }

    InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
    @Test(expected=YarnException.class) public void testMoveWouldViolateMaxResourcesConstraints() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueMgr=scheduler.getQueueManager(); FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true); queueMgr.getLeafQueue("queue2",true); scheduler.getAllocationConfiguration().maxQueueResources.put("root.queue2",Resource.newInstance(1024,1)); ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3); RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(2048,2)); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); scheduler.handle(updateEvent); scheduler.handle(updateEvent); assertEquals(Resource.newInstance(2048,2),oldQueue.getResourceUsage()); scheduler.moveApplication(appAttId.getApplicationId(),"queue2"); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testMoveNonRunnableApp() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueMgr=scheduler.getQueueManager(); FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true); FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true); scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue1",0); scheduler.getAllocationConfiguration().queueMaxApps.put("root.queue2",0); ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3); assertEquals(0,oldQueue.getNumRunnableApps()); scheduler.moveApplication(appAttId.getApplicationId(),"queue2"); assertEquals(0,oldQueue.getNumRunnableApps()); assertEquals(0,targetQueue.getNumRunnableApps()); assertEquals(0,queueMgr.getRootQueue().getNumRunnableApps()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testLowestCommonAncestorForNonRootParent() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); FSLeafQueue aQueue=mock(FSLeafQueue.class); FSLeafQueue bQueue=mock(FSLeafQueue.class); when(aQueue.getName()).thenReturn("root.queue1.a"); when(bQueue.getName()).thenReturn("root.queue1.b"); QueueManager queueManager=scheduler.getQueueManager(); FSParentQueue queue1=queueManager.getParentQueue("queue1",true); queue1.addChildQueue(aQueue); queue1.addChildQueue(bQueue); FSQueue ancestorQueue=scheduler.findLowestCommonAncestorQueue(aQueue,bQueue); assertEquals(ancestorQueue,queue1); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testMultipleNodesSingleRackRequest() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1"); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024),2,"127.0.0.2"); RMNode node3=MockNodes.newNodeInfo(2,Resources.createResource(1024),3,"127.0.0.3"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); ApplicationAttemptId appId=createAppAttemptId(this.APP_ID++,this.ATTEMPT_ID++); scheduler.addApplication(appId.getApplicationId(),"queue1","user1",false); scheduler.addApplicationAttempt(appId,false,false); List asks=new ArrayList(); asks.add(createResourceRequest(1024,node1.getHostName(),1,1,true)); asks.add(createResourceRequest(1024,node2.getHostName(),1,1,true)); asks.add(createResourceRequest(1024,node3.getHostName(),1,1,true)); asks.add(createResourceRequest(1024,node1.getRackName(),1,1,true)); asks.add(createResourceRequest(1024,node3.getRackName(),1,1,true)); asks.add(createResourceRequest(1024,ResourceRequest.ANY,1,2,true)); scheduler.allocate(appId,asks,new ArrayList(),null,null); scheduler.update(); NodeUpdateSchedulerEvent updateEvent1=new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent1); assertEquals(1,scheduler.getSchedulerApp(appId).getLiveContainers().size()); scheduler.update(); NodeUpdateSchedulerEvent updateEvent2=new NodeUpdateSchedulerEvent(node2); scheduler.handle(updateEvent2); assertEquals(2,scheduler.getSchedulerApp(appId).getLiveContainers().size()); }

    EqualityVerifier 
    @Test(timeout=5000) public void testMultipleContainersWaitingForReservation() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); createSchedulingRequest(1024,"queue1","user1",1); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue2","user2",1); ApplicationAttemptId attId2=createSchedulingRequest(1024,"queue3","user3",1); scheduler.update(); scheduler.handle(updateEvent); assertEquals(1024,scheduler.getSchedulerApp(attId1).getCurrentReservation().getMemory()); assertEquals(0,scheduler.getSchedulerApp(attId2).getCurrentReservation().getMemory()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=3000) public void testMaxAssignWithZeroMemoryContainers() throws Exception { conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,true); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,0); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(16384,16),0,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); ApplicationAttemptId attId=createSchedulingRequest(0,1,"root.default","user",8); FSAppAttempt app=scheduler.getSchedulerApp(attId); scheduler.maxAssign=2; scheduler.update(); scheduler.handle(updateEvent); assertEquals("Incorrect number of containers allocated",2,app.getLiveContainers().size()); scheduler.maxAssign=-1; scheduler.update(); scheduler.handle(updateEvent); assertEquals("Incorrect number of containers allocated",8,app.getLiveContainers().size()); }

    EqualityVerifier 
    @Test public void testRemoveNodeUpdatesRootQueueMetrics() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); assertEquals(0,scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(0,scheduler.getRootQueueMetrics().getAvailableVirtualCores()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024,4),1,"127.0.0.1"); NodeAddedSchedulerEvent addEvent=new NodeAddedSchedulerEvent(node1); scheduler.handle(addEvent); assertEquals(1024,scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(4,scheduler.getRootQueueMetrics().getAvailableVirtualCores()); scheduler.update(); assertEquals(1024,scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(4,scheduler.getRootQueueMetrics().getAvailableVirtualCores()); NodeRemovedSchedulerEvent removeEvent=new NodeRemovedSchedulerEvent(node1); scheduler.handle(removeEvent); assertEquals(0,scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(0,scheduler.getRootQueueMetrics().getAvailableVirtualCores()); scheduler.update(); assertEquals(0,scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(0,scheduler.getRootQueueMetrics().getAvailableVirtualCores()); }

    EqualityVerifier 
    @Test public void testQueuePlacementWithPolicy() throws Exception { conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,SimpleGroupsMapping.class,GroupMappingServiceProvider.class); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); ApplicationAttemptId appId; List rules=new ArrayList(); rules.add(new QueuePlacementRule.Specified().initialize(true,null)); rules.add(new QueuePlacementRule.User().initialize(false,null)); rules.add(new QueuePlacementRule.PrimaryGroup().initialize(false,null)); rules.add(new QueuePlacementRule.SecondaryGroupExistingQueue().initialize(false,null)); rules.add(new QueuePlacementRule.Default().initialize(true,null)); Set queues=Sets.newHashSet("root.user1","root.user3group","root.user4subgroup1","root.user4subgroup2","root.user5subgroup2"); Map> configuredQueues=new HashMap>(); configuredQueues.put(FSQueueType.LEAF,queues); configuredQueues.put(FSQueueType.PARENT,new HashSet()); scheduler.getAllocationConfiguration().placementPolicy=new QueuePlacementPolicy(rules,configuredQueues,conf); appId=createSchedulingRequest(1024,"somequeue","user1"); assertEquals("root.somequeue",scheduler.getSchedulerApp(appId).getQueueName()); appId=createSchedulingRequest(1024,"default","user1"); assertEquals("root.user1",scheduler.getSchedulerApp(appId).getQueueName()); appId=createSchedulingRequest(1024,"default","user3"); assertEquals("root.user3group",scheduler.getSchedulerApp(appId).getQueueName()); appId=createSchedulingRequest(1024,"default","user4"); assertEquals("root.user4subgroup1",scheduler.getSchedulerApp(appId).getQueueName()); appId=createSchedulingRequest(1024,"default","user5"); assertEquals("root.user5subgroup2",scheduler.getSchedulerApp(appId).getQueueName()); appId=createSchedulingRequest(1024,"default","otheruser"); assertEquals("root.default",scheduler.getSchedulerApp(appId).getQueueName()); rules=new ArrayList(); rules.add(new QueuePlacementRule.User().initialize(false,null)); rules.add(new QueuePlacementRule.Specified().initialize(true,null)); rules.add(new QueuePlacementRule.Default().initialize(true,null)); scheduler.getAllocationConfiguration().placementPolicy=new QueuePlacementPolicy(rules,configuredQueues,conf); appId=createSchedulingRequest(1024,"somequeue","user1"); assertEquals("root.user1",scheduler.getSchedulerApp(appId).getQueueName()); appId=createSchedulingRequest(1024,"somequeue","otheruser"); assertEquals("root.somequeue",scheduler.getSchedulerApp(appId).getQueueName()); appId=createSchedulingRequest(1024,"default","otheruser"); assertEquals("root.default",scheduler.getSchedulerApp(appId).getQueueName()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testHierarchicalQueueAllocationFileParsing() throws IOException, SAXException, AllocationConfigurationException, ParserConfigurationException { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println(""); out.println("2048mb,0vcores"); out.println(""); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueManager=scheduler.getQueueManager(); Collection leafQueues=queueManager.getLeafQueues(); Assert.assertEquals(4,leafQueues.size()); Assert.assertNotNull(queueManager.getLeafQueue("queueA",false)); Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueC",false)); Assert.assertNotNull(queueManager.getLeafQueue("queueB.queueD",false)); Assert.assertNotNull(queueManager.getLeafQueue("default",false)); Assert.assertEquals(4,leafQueues.size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testReservationThatDoesntFit() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); ApplicationAttemptId attId=createSchedulingRequest(2048,"queue1","user1",1); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); FSAppAttempt app=scheduler.getSchedulerApp(attId); assertEquals(0,app.getLiveContainers().size()); assertEquals(0,app.getReservedContainers().size()); createSchedulingRequestExistingApplication(1024,2,attId); scheduler.update(); scheduler.handle(updateEvent); assertEquals(1,app.getLiveContainers().size()); assertEquals(0,app.getReservedContainers().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNonMinZeroResourcesSettings() throws IOException { FairScheduler fs=new FairScheduler(); YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,256); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,1); conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB,512); conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES,2); fs.init(conf); fs.reinitialize(conf,null); Assert.assertEquals(256,fs.getMinimumResourceCapability().getMemory()); Assert.assertEquals(1,fs.getMinimumResourceCapability().getVirtualCores()); Assert.assertEquals(512,fs.getIncrementResourceCapability().getMemory()); Assert.assertEquals(2,fs.getIncrementResourceCapability().getVirtualCores()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAssignToQueue() throws Exception { conf.set(FairSchedulerConfiguration.USER_AS_DEFAULT_QUEUE,"true"); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMApp rmApp1=new MockRMApp(0,0,RMAppState.NEW); RMApp rmApp2=new MockRMApp(1,1,RMAppState.NEW); FSLeafQueue queue1=scheduler.assignToQueue(rmApp1,"default","asterix"); FSLeafQueue queue2=scheduler.assignToQueue(rmApp2,"notdefault","obelix"); assertEquals(rmApp1.getQueue(),queue1.getName()); assertEquals("root.asterix",rmApp1.getQueue()); assertEquals(rmApp2.getQueue(),queue2.getName()); assertEquals("root.notdefault",rmApp2.getQueue()); }

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testThreadLifeCycle() throws InterruptedException { conf.setBoolean(FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,true); scheduler.init(conf); scheduler.start(); Thread updateThread=scheduler.updateThread; Thread schedulingThread=scheduler.schedulingThread; assertTrue(updateThread.isAlive()); assertTrue(schedulingThread.isAlive()); scheduler.stop(); int numRetries=100; while (numRetries-- > 0 && (updateThread.isAlive() || schedulingThread.isAlive())) { Thread.sleep(50); } assertNotEquals("One of the threads is still alive",0,numRetries); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testReservationWhileMultiplePriorities() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024,4),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); ApplicationAttemptId attId=createSchedulingRequest(1024,4,"queue1","user1",1,2); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); FSAppAttempt app=scheduler.getSchedulerApp(attId); assertEquals(1,app.getLiveContainers().size()); ContainerId containerId=scheduler.getSchedulerApp(attId).getLiveContainers().iterator().next().getContainerId(); createSchedulingRequestExistingApplication(1024,4,2,attId); scheduler.update(); scheduler.handle(updateEvent); assertEquals(1,app.getLiveContainers().size()); assertEquals(0,scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(0,scheduler.getRootQueueMetrics().getAvailableVirtualCores()); createSchedulingRequestExistingApplication(1024,4,1,attId); scheduler.update(); scheduler.handle(updateEvent); assertEquals(1,app.getLiveContainers().size()); for ( RMContainer container : app.getReservedContainers()) { assertEquals(2,container.getReservedPriority().getPriority()); } scheduler.allocate(attId,new ArrayList(),Arrays.asList(containerId),null,null); assertEquals(1024,scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(4,scheduler.getRootQueueMetrics().getAvailableVirtualCores()); scheduler.update(); scheduler.handle(updateEvent); Collection liveContainers=app.getLiveContainers(); assertEquals(1,liveContainers.size()); for ( RMContainer liveContainer : liveContainers) { Assert.assertEquals(2,liveContainer.getContainer().getPriority().getPriority()); } assertEquals(0,scheduler.getRootQueueMetrics().getAvailableMB()); assertEquals(0,scheduler.getRootQueueMetrics().getAvailableVirtualCores()); }

    EqualityVerifier 
    @Test public void testSchedulerRootQueueMetrics() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024)); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); createSchedulingRequest(1024,"queue1","user1",1); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); createSchedulingRequest(1024,"queue2","user1",1); scheduler.update(); scheduler.handle(updateEvent); assertEquals(1024,scheduler.rootMetrics.getReservedMB()); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(1024)); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); NodeUpdateSchedulerEvent updateEvent2=new NodeUpdateSchedulerEvent(node2); scheduler.handle(nodeEvent2); scheduler.handle(updateEvent2); assertEquals(1024,scheduler.rootMetrics.getReservedMB()); scheduler.handle(updateEvent); assertEquals(0,scheduler.rootMetrics.getReservedMB()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testHierarchicalQueuesSimilarParents() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueManager=scheduler.getQueueManager(); FSLeafQueue leafQueue=queueManager.getLeafQueue("parent.child",true); Assert.assertEquals(2,queueManager.getLeafQueues().size()); Assert.assertNotNull(leafQueue); Assert.assertEquals("root.parent.child",leafQueue.getName()); FSLeafQueue leafQueue2=queueManager.getLeafQueue("parent",true); Assert.assertNull(leafQueue2); Assert.assertEquals(2,queueManager.getLeafQueues().size()); FSLeafQueue leafQueue3=queueManager.getLeafQueue("parent.child.grandchild",true); Assert.assertNull(leafQueue3); Assert.assertEquals(2,queueManager.getLeafQueues().size()); FSLeafQueue leafQueue4=queueManager.getLeafQueue("parent.sister",true); Assert.assertNotNull(leafQueue4); Assert.assertEquals("root.parent.sister",leafQueue4.getName()); Assert.assertEquals(3,queueManager.getLeafQueues().size()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier IdentityVerifier EqualityVerifier HybridVerifier 
    @Test public void testMoveRunnableApp() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); QueueManager queueMgr=scheduler.getQueueManager(); FSLeafQueue oldQueue=queueMgr.getLeafQueue("queue1",true); FSLeafQueue targetQueue=queueMgr.getLeafQueue("queue2",true); ApplicationAttemptId appAttId=createSchedulingRequest(1024,1,"queue1","user1",3); ApplicationId appId=appAttId.getApplicationId(); RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(1024)); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); scheduler.handle(updateEvent); assertEquals(Resource.newInstance(1024,1),oldQueue.getResourceUsage()); scheduler.update(); assertEquals(Resource.newInstance(3072,3),oldQueue.getDemand()); scheduler.moveApplication(appId,"queue2"); FSAppAttempt app=scheduler.getSchedulerApp(appAttId); assertSame(targetQueue,app.getQueue()); assertFalse(oldQueue.getRunnableAppSchedulables().contains(app)); assertTrue(targetQueue.getRunnableAppSchedulables().contains(app)); assertEquals(Resource.newInstance(0,0),oldQueue.getResourceUsage()); assertEquals(Resource.newInstance(1024,1),targetQueue.getResourceUsage()); assertEquals(0,oldQueue.getNumRunnableApps()); assertEquals(1,targetQueue.getNumRunnableApps()); assertEquals(1,queueMgr.getRootQueue().getNumRunnableApps()); scheduler.update(); assertEquals(Resource.newInstance(0,0),oldQueue.getDemand()); assertEquals(Resource.newInstance(3072,3),targetQueue.getDemand()); }

    EqualityVerifier 
    @Test public void testAggregateCapacityTracking() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(1024),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); assertEquals(1024,scheduler.getClusterResource().getMemory()); RMNode node2=MockNodes.newNodeInfo(1,Resources.createResource(512),2,"127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2=new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); assertEquals(1536,scheduler.getClusterResource().getMemory()); NodeRemovedSchedulerEvent nodeEvent3=new NodeRemovedSchedulerEvent(node1); scheduler.handle(nodeEvent3); assertEquals(512,scheduler.getClusterResource().getMemory()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testQueueMaxAMShareDefault() throws Exception { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println(""); out.println(""); out.println("1.0"); out.println(""); out.println(""); out.println(""); out.println(""); out.println(""); out.println(""); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node=MockNodes.newNodeInfo(1,Resources.createResource(8192,20),0,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent=new NodeAddedSchedulerEvent(node); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node); scheduler.handle(nodeEvent); scheduler.update(); FSLeafQueue queue1=scheduler.getQueueManager().getLeafQueue("queue1",true); assertEquals("Queue queue1's fair share should be 0",0,queue1.getFairShare().getMemory()); FSLeafQueue queue2=scheduler.getQueueManager().getLeafQueue("queue2",true); assertEquals("Queue queue2's fair share should be 0",0,queue2.getFairShare().getMemory()); FSLeafQueue queue3=scheduler.getQueueManager().getLeafQueue("queue3",true); assertEquals("Queue queue3's fair share should be 0",0,queue3.getFairShare().getMemory()); FSLeafQueue queue4=scheduler.getQueueManager().getLeafQueue("queue4",true); assertEquals("Queue queue4's fair share should be 0",0,queue4.getFairShare().getMemory()); FSLeafQueue queue5=scheduler.getQueueManager().getLeafQueue("queue5",true); assertEquals("Queue queue5's fair share should be 0",0,queue5.getFairShare().getMemory()); List queues=Arrays.asList("root.default","root.queue3","root.queue4","root.queue5"); for ( String queue : queues) { createSchedulingRequest(1 * 1024,queue,"user1"); scheduler.update(); scheduler.handle(updateEvent); } Resource amResource1=Resource.newInstance(2048,1); int amPriority=RMAppAttemptImpl.AM_CONTAINER_PRIORITY.getPriority(); ApplicationAttemptId attId1=createAppAttemptId(1,1); createApplicationWithAMResource(attId1,"queue1","test1",amResource1); createSchedulingRequestExistingApplication(2048,1,amPriority,attId1); FSAppAttempt app1=scheduler.getSchedulerApp(attId1); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application1's AM requests 2048 MB memory",2048,app1.getAMResource().getMemory()); assertEquals("Application1's AM should be running",1,app1.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory",2048,queue1.getAmResourceUsage().getMemory()); ApplicationAttemptId attId2=createAppAttemptId(2,1); createApplicationWithAMResource(attId2,"queue2","test1",amResource1); createSchedulingRequestExistingApplication(2048,1,amPriority,attId2); FSAppAttempt app2=scheduler.getSchedulerApp(attId2); scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application2's AM requests 2048 MB memory",2048,app2.getAMResource().getMemory()); assertEquals("Application2's AM should not be running",0,app2.getLiveContainers().size()); assertEquals("Queue2's AM resource usage should be 0 MB memory",0,queue2.getAmResourceUsage().getMemory()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testMinZeroResourcesSettings() throws IOException { FairScheduler fs=new FairScheduler(); YarnConfiguration conf=new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,0); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES,0); conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB,512); conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES,2); fs.init(conf); fs.reinitialize(conf,null); Assert.assertEquals(0,fs.getMinimumResourceCapability().getMemory()); Assert.assertEquals(0,fs.getMinimumResourceCapability().getVirtualCores()); Assert.assertEquals(512,fs.getIncrementResourceCapability().getMemory()); Assert.assertEquals(2,fs.getIncrementResourceCapability().getVirtualCores()); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testEmptyQueueName() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); assertEquals(1,scheduler.getQueueManager().getLeafQueues().size()); ApplicationAttemptId appAttemptId=createAppAttemptId(1,1); AppAddedSchedulerEvent appAddedEvent=new AppAddedSchedulerEvent(appAttemptId.getApplicationId(),"","user1"); scheduler.handle(appAddedEvent); assertEquals(1,scheduler.getQueueManager().getLeafQueues().size()); assertNull(scheduler.getSchedulerApp(appAttemptId)); assertEquals(0,resourceManager.getRMContext().getRMApps().size()); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testNestedUserQueue() throws IOException { conf.set(FairSchedulerConfiguration.ALLOCATION_FILE,ALLOC_FILE); conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,SimpleGroupsMapping.class,GroupMappingServiceProvider.class); PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println(""); out.println(""); out.println(" "); out.println(""); out.println(""); out.println(""); out.println(""); out.close(); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMApp rmApp1=new MockRMApp(0,0,RMAppState.NEW); FSLeafQueue user1Leaf=scheduler.assignToQueue(rmApp1,"root.default","user1"); assertEquals("root.user1group.user1",user1Leaf.getName()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testFifoWithinQueue() throws Exception { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(3072,3),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); ApplicationAttemptId attId1=createSchedulingRequest(1024,"queue1","user1",2); ApplicationAttemptId attId2=createSchedulingRequest(1024,"queue1","user1",2); FSAppAttempt app1=scheduler.getSchedulerApp(attId1); FSAppAttempt app2=scheduler.getSchedulerApp(attId2); FSLeafQueue queue1=scheduler.getQueueManager().getLeafQueue("queue1",true); queue1.setPolicy(new FifoPolicy()); scheduler.update(); NodeUpdateSchedulerEvent updateEvent=new NodeUpdateSchedulerEvent(node1); scheduler.handle(updateEvent); assertEquals(1,app1.getLiveContainers().size()); assertEquals(0,app2.getLiveContainers().size()); scheduler.handle(updateEvent); assertEquals(2,app1.getLiveContainers().size()); assertEquals(0,app2.getLiveContainers().size()); scheduler.handle(updateEvent); assertEquals(2,app1.getLiveContainers().size()); assertEquals(1,app2.getLiveContainers().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSimpleHierarchicalFairShareCalculation() throws IOException { scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,resourceManager.getRMContext()); int capacity=10 * 24; RMNode node1=MockNodes.newNodeInfo(1,Resources.createResource(capacity),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); createSchedulingRequest(10 * 1024,"parent.queue2","user1"); createSchedulingRequest(10 * 1024,"parent.queue3","user1"); createSchedulingRequest(10 * 1024,"root.default","user1"); scheduler.update(); scheduler.getQueueManager().getRootQueue().setSteadyFairShare(scheduler.getClusterResource()); scheduler.getQueueManager().getRootQueue().recomputeSteadyShares(); QueueManager queueManager=scheduler.getQueueManager(); Collection queues=queueManager.getLeafQueues(); assertEquals(3,queues.size()); FSLeafQueue queue1=queueManager.getLeafQueue("default",true); FSLeafQueue queue2=queueManager.getLeafQueue("parent.queue2",true); FSLeafQueue queue3=queueManager.getLeafQueue("parent.queue3",true); assertEquals(capacity / 2,queue1.getFairShare().getMemory()); assertEquals(capacity / 2,queue1.getMetrics().getFairShareMB()); assertEquals(capacity / 2,queue1.getSteadyFairShare().getMemory()); assertEquals(capacity / 2,queue1.getMetrics().getSteadyFairShareMB()); assertEquals(capacity / 4,queue2.getFairShare().getMemory()); assertEquals(capacity / 4,queue2.getMetrics().getFairShareMB()); assertEquals(capacity / 4,queue2.getSteadyFairShare().getMemory()); assertEquals(capacity / 4,queue2.getMetrics().getSteadyFairShareMB()); assertEquals(capacity / 4,queue3.getFairShare().getMemory()); assertEquals(capacity / 4,queue3.getMetrics().getFairShareMB()); assertEquals(capacity / 4,queue3.getSteadyFairShare().getMemory()); assertEquals(capacity / 4,queue3.getMetrics().getSteadyFairShareMB()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestFairSchedulerConfiguration

    EqualityVerifier 
    @Test public void testParseResourceConfigValue() throws Exception { assertEquals(BuilderUtils.newResource(1024,2),parseResourceConfigValue("2 vcores, 1024 mb")); assertEquals(BuilderUtils.newResource(1024,2),parseResourceConfigValue("1024 mb, 2 vcores")); assertEquals(BuilderUtils.newResource(1024,2),parseResourceConfigValue("2vcores,1024mb")); assertEquals(BuilderUtils.newResource(1024,2),parseResourceConfigValue("1024mb,2vcores")); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestFairSchedulerFairShare

    IterativeVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFairShareWithDRFMultipleActiveQueuesUnderDifferentParent() throws IOException { int nodeMem=16 * 1024; int nodeVCores=10; createClusterWithQueuesAndOneNode(nodeMem,nodeVCores,"drf"); createSchedulingRequest(2 * 1024,"root.parentA.childA1","user1"); createSchedulingRequest(3 * 1024,"root.parentA.childA2","user2"); createSchedulingRequest(1 * 1024,"root.parentB.childB1","user3"); createSchedulingRequest(1 * 1024,"root.default","user4"); scheduler.update(); for (int i=1; i <= 2; i++) { assertEquals(40,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA" + i,false).getFairShare().getMemory() / nodeMem * 100,.9); assertEquals(40,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA" + i,false).getFairShare().getVirtualCores() / nodeVCores * 100,.9); } assertEquals(10,(double)scheduler.getQueueManager().getLeafQueue("root.parentB.childB1",false).getFairShare().getMemory() / nodeMem * 100,.9); assertEquals(10,(double)scheduler.getQueueManager().getLeafQueue("root.parentB.childB1",false).getFairShare().getVirtualCores() / nodeVCores * 100,.9); Collection leafQueues=scheduler.getQueueManager().getLeafQueues(); for ( FSLeafQueue leaf : leafQueues) { if (leaf.getName().startsWith("root.parentA")) { assertEquals(0.2,(double)leaf.getSteadyFairShare().getMemory() / nodeMem,0.001); assertEquals(0.2,(double)leaf.getSteadyFairShare().getVirtualCores() / nodeVCores,0.001); } else if (leaf.getName().startsWith("root.parentB")) { assertEquals(0.05,(double)leaf.getSteadyFairShare().getMemory() / nodeMem,0.001); assertEquals(0.1,(double)leaf.getSteadyFairShare().getVirtualCores() / nodeVCores,0.001); } } }

    BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFairShareNoAppsRunning() throws IOException { int nodeCapacity=16 * 1024; createClusterWithQueuesAndOneNode(nodeCapacity,"fair"); scheduler.update(); Collection leafQueues=scheduler.getQueueManager().getLeafQueues(); for ( FSLeafQueue leaf : leafQueues) { if (leaf.getName().startsWith("root.parentA")) { assertEquals(0,(double)leaf.getFairShare().getMemory() / nodeCapacity,0); } else if (leaf.getName().startsWith("root.parentB")) { assertEquals(0,(double)leaf.getFairShare().getMemory() / nodeCapacity,0); } } verifySteadyFairShareMemory(leafQueues,nodeCapacity); }

    IterativeVerifier EqualityVerifier 
    @Test public void testFairShareMultipleActiveQueuesUnderSameParent() throws IOException { int nodeCapacity=16 * 1024; createClusterWithQueuesAndOneNode(nodeCapacity,"fair"); createSchedulingRequest(2 * 1024,"root.parentA.childA1","user1"); createSchedulingRequest(2 * 1024,"root.parentA.childA2","user2"); createSchedulingRequest(2 * 1024,"root.parentA.childA3","user3"); scheduler.update(); for (int i=1; i <= 3; i++) { assertEquals(33,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA" + i,false).getFairShare().getMemory() / nodeCapacity * 100,.9); } verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),nodeCapacity); }

    EqualityVerifier 
    @Test public void testFairShareOneAppRunning() throws IOException { int nodeCapacity=16 * 1024; createClusterWithQueuesAndOneNode(nodeCapacity,"fair"); createSchedulingRequest(2 * 1024,"root.parentA.childA1","user1"); scheduler.update(); assertEquals(100,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA1",false).getFairShare().getMemory() / nodeCapacity * 100,0.1); assertEquals(0,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA2",false).getFairShare().getMemory() / nodeCapacity,0.1); verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),nodeCapacity); }

    IterativeVerifier EqualityVerifier 
    @Test public void testFairShareMultipleActiveQueuesUnderDifferentParent() throws IOException { int nodeCapacity=16 * 1024; createClusterWithQueuesAndOneNode(nodeCapacity,"fair"); createSchedulingRequest(2 * 1024,"root.parentA.childA1","user1"); createSchedulingRequest(3 * 1024,"root.parentA.childA2","user2"); createSchedulingRequest(1 * 1024,"root.parentB.childB1","user3"); createSchedulingRequest(1 * 1024,"root.default","user4"); scheduler.update(); for (int i=1; i <= 2; i++) { assertEquals(40,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA" + i,false).getFairShare().getMemory() / nodeCapacity * 100,.9); } assertEquals(10,(double)scheduler.getQueueManager().getLeafQueue("root.parentB.childB1",false).getFairShare().getMemory() / nodeCapacity * 100,.9); verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),nodeCapacity); }

    IterativeVerifier EqualityVerifier 
    @Test public void testFairShareResetsToZeroWhenAppsComplete() throws IOException { int nodeCapacity=16 * 1024; createClusterWithQueuesAndOneNode(nodeCapacity,"fair"); ApplicationAttemptId app1=createSchedulingRequest(2 * 1024,"root.parentA.childA1","user1"); ApplicationAttemptId app2=createSchedulingRequest(3 * 1024,"root.parentA.childA2","user2"); scheduler.update(); for (int i=1; i <= 2; i++) { assertEquals(50,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA" + i,false).getFairShare().getMemory() / nodeCapacity * 100,.9); } AppAttemptRemovedSchedulerEvent appRemovedEvent1=new AppAttemptRemovedSchedulerEvent(app1,RMAppAttemptState.FINISHED,false); scheduler.handle(appRemovedEvent1); scheduler.update(); assertEquals(0,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA1",false).getFairShare().getMemory() / nodeCapacity * 100,0); assertEquals(100,(double)scheduler.getQueueManager().getLeafQueue("root.parentA.childA2",false).getFairShare().getMemory() / nodeCapacity * 100,0.1); verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(),nodeCapacity); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestFairSchedulerPreemption

    EqualityVerifier 
    @Test public void testPreemptionWithFreeResources() throws Exception { PrintWriter out=new PrintWriter(new FileWriter(ALLOC_FILE)); out.println(""); out.println(""); out.println(""); out.println("0mb,0vcores"); out.println(""); out.println(""); out.println("1"); out.println("1024mb,0vcores"); out.println(""); out.println(""); out.println("1"); out.println("1024mb,0vcores"); out.println(""); out.print("5"); out.print("10"); out.println(""); out.close(); startResourceManager(0f); registerNodeAndSubmitApp(4 * 1024,4,2,1024); createSchedulingRequest(1024,"queueB","user1",1,1); scheduler.update(); clock.tick(6); ((StubbedFairScheduler)scheduler).resetLastPreemptResources(); scheduler.preemptTasksIfNecessary(); assertEquals("preemptResources() should have been called",1024,((StubbedFairScheduler)scheduler).lastPreemptMemory); resourceManager.stop(); startResourceManager(0.8f); registerNodeAndSubmitApp(4 * 1024,4,3,1024); createSchedulingRequest(1024,"queueB","user1",1,1); scheduler.update(); clock.tick(6); ((StubbedFairScheduler)scheduler).resetLastPreemptResources(); scheduler.preemptTasksIfNecessary(); assertEquals("preemptResources() should not have been called",-1,((StubbedFairScheduler)scheduler).lastPreemptMemory); resourceManager.stop(); startResourceManager(0.7f); registerNodeAndSubmitApp(4 * 1024,4,3,1024); createSchedulingRequest(1024,"queueB","user1",1,1); scheduler.update(); clock.tick(6); ((StubbedFairScheduler)scheduler).resetLastPreemptResources(); scheduler.preemptTasksIfNecessary(); assertEquals("preemptResources() should have been called",1024,((StubbedFairScheduler)scheduler).lastPreemptMemory); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestMaxRunningAppsEnforcer

    EqualityVerifier 
    @Test public void testMultiListStartTimeIteratorEmptyAppLists(){ List> lists=new ArrayList>(); lists.add(Arrays.asList(mockAppAttempt(1))); lists.add(Arrays.asList(mockAppAttempt(2))); Iterator iter=new MaxRunningAppsEnforcer.MultiListStartTimeIterator(lists); assertEquals(1,iter.next().getStartTime()); assertEquals(2,iter.next().getStartTime()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRemoveDoesNotEnableAnyApp(){ FSLeafQueue leaf1=queueManager.getLeafQueue("root.queue1",true); FSLeafQueue leaf2=queueManager.getLeafQueue("root.queue2",true); queueMaxApps.put("root",2); queueMaxApps.put("root.queue1",1); queueMaxApps.put("root.queue2",1); FSAppAttempt app1=addApp(leaf1,"user"); addApp(leaf2,"user"); addApp(leaf2,"user"); assertEquals(1,leaf1.getRunnableAppSchedulables().size()); assertEquals(1,leaf2.getRunnableAppSchedulables().size()); assertEquals(1,leaf2.getNonRunnableAppSchedulables().size()); removeApp(app1); assertEquals(0,leaf1.getRunnableAppSchedulables().size()); assertEquals(1,leaf2.getRunnableAppSchedulables().size()); assertEquals(1,leaf2.getNonRunnableAppSchedulables().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRemoveEnablesAppOnCousinQueue(){ FSLeafQueue leaf1=queueManager.getLeafQueue("root.queue1.subqueue1.leaf1",true); FSLeafQueue leaf2=queueManager.getLeafQueue("root.queue1.subqueue2.leaf2",true); queueMaxApps.put("root.queue1",2); FSAppAttempt app1=addApp(leaf1,"user"); addApp(leaf2,"user"); addApp(leaf2,"user"); assertEquals(1,leaf1.getRunnableAppSchedulables().size()); assertEquals(1,leaf2.getRunnableAppSchedulables().size()); assertEquals(1,leaf2.getNonRunnableAppSchedulables().size()); removeApp(app1); assertEquals(0,leaf1.getRunnableAppSchedulables().size()); assertEquals(2,leaf2.getRunnableAppSchedulables().size()); assertEquals(0,leaf2.getNonRunnableAppSchedulables().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRemoveEnablesOneByQueueOneByUser(){ FSLeafQueue leaf1=queueManager.getLeafQueue("root.queue1.leaf1",true); FSLeafQueue leaf2=queueManager.getLeafQueue("root.queue1.leaf2",true); queueMaxApps.put("root.queue1.leaf1",2); userMaxApps.put("user1",1); FSAppAttempt app1=addApp(leaf1,"user1"); addApp(leaf1,"user2"); addApp(leaf1,"user3"); addApp(leaf2,"user1"); assertEquals(2,leaf1.getRunnableAppSchedulables().size()); assertEquals(1,leaf1.getNonRunnableAppSchedulables().size()); assertEquals(1,leaf2.getNonRunnableAppSchedulables().size()); removeApp(app1); assertEquals(2,leaf1.getRunnableAppSchedulables().size()); assertEquals(1,leaf2.getRunnableAppSchedulables().size()); assertEquals(0,leaf1.getNonRunnableAppSchedulables().size()); assertEquals(0,leaf2.getNonRunnableAppSchedulables().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testMultipleAppsWaitingOnCousinQueue(){ FSLeafQueue leaf1=queueManager.getLeafQueue("root.queue1.subqueue1.leaf1",true); FSLeafQueue leaf2=queueManager.getLeafQueue("root.queue1.subqueue2.leaf2",true); queueMaxApps.put("root.queue1",2); FSAppAttempt app1=addApp(leaf1,"user"); addApp(leaf2,"user"); addApp(leaf2,"user"); addApp(leaf2,"user"); assertEquals(1,leaf1.getRunnableAppSchedulables().size()); assertEquals(1,leaf2.getRunnableAppSchedulables().size()); assertEquals(2,leaf2.getNonRunnableAppSchedulables().size()); removeApp(app1); assertEquals(0,leaf1.getRunnableAppSchedulables().size()); assertEquals(2,leaf2.getRunnableAppSchedulables().size()); assertEquals(1,leaf2.getNonRunnableAppSchedulables().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testRemoveEnablingOrderedByStartTime(){ FSLeafQueue leaf1=queueManager.getLeafQueue("root.queue1.subqueue1.leaf1",true); FSLeafQueue leaf2=queueManager.getLeafQueue("root.queue1.subqueue2.leaf2",true); queueMaxApps.put("root.queue1",2); FSAppAttempt app1=addApp(leaf1,"user"); addApp(leaf2,"user"); addApp(leaf2,"user"); clock.tick(20); addApp(leaf1,"user"); assertEquals(1,leaf1.getRunnableAppSchedulables().size()); assertEquals(1,leaf2.getRunnableAppSchedulables().size()); assertEquals(1,leaf1.getNonRunnableAppSchedulables().size()); assertEquals(1,leaf2.getNonRunnableAppSchedulables().size()); removeApp(app1); assertEquals(0,leaf1.getRunnableAppSchedulables().size()); assertEquals(2,leaf2.getRunnableAppSchedulables().size()); assertEquals(0,leaf2.getNonRunnableAppSchedulables().size()); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.TestQueuePlacementPolicy

    APIUtilityVerifier EqualityVerifier 
    @Test public void testNestedUserQueueSecondaryGroup() throws Exception { StringBuffer sb=new StringBuffer(); sb.append(""); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(""); QueuePlacementPolicy policy=parse(sb.toString()); assertEquals("root.default",policy.assignAppToQueue("root.default","user1")); configuredQueues.get(FSQueueType.PARENT).add("root.user1subgroup1"); policy=parse(sb.toString()); assertEquals("root.user1subgroup1.user1",policy.assignAppToQueue("root.default","user1")); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testDefaultRuleWithQueueAttribute() throws Exception { configuredQueues.get(FSQueueType.LEAF).add("root.someDefaultQueue"); StringBuffer sb=new StringBuffer(); sb.append(""); sb.append(" "); sb.append(" "); sb.append(""); QueuePlacementPolicy policy=parse(sb.toString()); assertEquals("root.someDefaultQueue",policy.assignAppToQueue("root.default","user1")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSpecifiedThenReject() throws Exception { StringBuffer sb=new StringBuffer(); sb.append(""); sb.append(" "); sb.append(" "); sb.append(""); QueuePlacementPolicy policy=parse(sb.toString()); assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","someuser")); assertEquals(null,policy.assignAppToQueue("default","someuser")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNestedUserQueuePrimaryGroupNoCreate() throws Exception { StringBuffer sb=new StringBuffer(); sb.append(""); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(""); QueuePlacementPolicy policy=parse(sb.toString()); assertEquals("root.default",policy.assignAppToQueue("root.default","user1")); configuredQueues.get(FSQueueType.PARENT).add("root.user1group"); policy=parse(sb.toString()); assertEquals("root.user1group.user1",policy.assignAppToQueue("root.default","user1")); sb=new StringBuffer(); sb.append(""); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(""); assertEquals("root.default",policy.assignAppToQueue("root.default","user2")); configuredQueues.get(FSQueueType.PARENT).add("root.user2group"); configuredQueues.get(FSQueueType.LEAF).add("root.user2group.user2"); policy=parse(sb.toString()); assertEquals("root.user2group.user2",policy.assignAppToQueue("root.default","user2")); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testNestedUserQueueDefaultRule() throws Exception { configuredQueues.get(FSQueueType.PARENT).add("root.parentq"); StringBuffer sb=new StringBuffer(); sb.append(""); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(""); QueuePlacementPolicy policy=parse(sb.toString()); assertEquals("root.parentq.user1",policy.assignAppToQueue("root.default","user1")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNoCreate() throws Exception { StringBuffer sb=new StringBuffer(); sb.append(""); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(""); configuredQueues.get(FSQueueType.LEAF).add("root.someuser"); QueuePlacementPolicy policy=parse(sb.toString()); assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","someuser")); assertEquals("root.someuser",policy.assignAppToQueue("default","someuser")); assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","otheruser")); assertEquals("root.default",policy.assignAppToQueue("default","otheruser")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNestedUserQueueSpecificRule() throws Exception { StringBuffer sb=new StringBuffer(); sb.append(""); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(""); configuredQueues.get(FSQueueType.PARENT).add("root.parent1"); configuredQueues.get(FSQueueType.PARENT).add("root.parent2"); QueuePlacementPolicy policy=parse(sb.toString()); assertEquals("root.parent1.user1",policy.assignAppToQueue("root.parent1","user1")); assertEquals("root.parent2.user2",policy.assignAppToQueue("root.parent2","user2")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSpecifiedUserPolicy() throws Exception { StringBuffer sb=new StringBuffer(); sb.append(""); sb.append(" "); sb.append(" "); sb.append(""); QueuePlacementPolicy policy=parse(sb.toString()); assertEquals("root.specifiedq",policy.assignAppToQueue("specifiedq","someuser")); assertEquals("root.someuser",policy.assignAppToQueue("default","someuser")); assertEquals("root.otheruser",policy.assignAppToQueue("default","otheruser")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNestedUserQueuePrimaryGroup() throws Exception { StringBuffer sb=new StringBuffer(); sb.append(""); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(" "); sb.append(""); QueuePlacementPolicy policy=parse(sb.toString()); assertEquals("root.user1group.user1",policy.assignAppToQueue("root.default","user1")); configuredQueues.get(FSQueueType.LEAF).add("root.specifiedq"); assertEquals("root.specifiedq",policy.assignAppToQueue("root.specifiedq","user2")); configuredQueues.get(FSQueueType.LEAF).add("root.user3group"); assertEquals("root.default",policy.assignAppToQueue("root.default","user3")); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.TestDominantResourceFairnessPolicy

    InternalCallVerifier EqualityVerifier 
    @Test public void testCalculateShares(){ Resource used=Resources.createResource(10,5); Resource capacity=Resources.createResource(100,10); ResourceType[] resourceOrder=new ResourceType[2]; ResourceWeights shares=new ResourceWeights(); DominantResourceFairnessPolicy.DominantResourceFairnessComparator comparator=new DominantResourceFairnessPolicy.DominantResourceFairnessComparator(); comparator.calculateShares(used,capacity,shares,resourceOrder,ResourceWeights.NEUTRAL); assertEquals(.1,shares.getWeight(ResourceType.MEMORY),.00001); assertEquals(.5,shares.getWeight(ResourceType.CPU),.00001); assertEquals(ResourceType.CPU,resourceOrder[0]); assertEquals(ResourceType.MEMORY,resourceOrder[1]); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.TestFifoScheduler

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetAppsInQueue() throws Exception { Application application_0=new Application("user_0",resourceManager); application_0.submit(); Application application_1=new Application("user_0",resourceManager); application_1.submit(); ResourceScheduler scheduler=resourceManager.getResourceScheduler(); List appsInDefault=scheduler.getAppsInQueue("default"); assertTrue(appsInDefault.contains(application_0.getApplicationAttemptId())); assertTrue(appsInDefault.contains(application_1.getApplicationAttemptId())); assertEquals(2,appsInDefault.size()); Assert.assertNull(scheduler.getAppsInQueue("someotherqueue")); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testAppAttemptMetrics() throws Exception { AsyncDispatcher dispatcher=new InlineDispatcher(); RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class); RMContext rmContext=new RMContextImpl(dispatcher,null,null,null,null,null,null,null,null,writer); FifoScheduler scheduler=new FifoScheduler(); Configuration conf=new Configuration(); scheduler.setRMContext(rmContext); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(conf,rmContext); QueueMetrics metrics=scheduler.getRootQueueMetrics(); int beforeAppsSubmitted=metrics.getAppsSubmitted(); ApplicationId appId=BuilderUtils.newApplicationId(200,1); ApplicationAttemptId appAttemptId=BuilderUtils.newApplicationAttemptId(appId,1); SchedulerEvent appEvent=new AppAddedSchedulerEvent(appId,"queue","user"); scheduler.handle(appEvent); SchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false); scheduler.handle(attemptEvent); appAttemptId=BuilderUtils.newApplicationAttemptId(appId,2); SchedulerEvent attemptEvent2=new AppAttemptAddedSchedulerEvent(appAttemptId,false); scheduler.handle(attemptEvent2); int afterAppsSubmitted=metrics.getAppsSubmitted(); Assert.assertEquals(1,afterAppsSubmitted - beforeAppsSubmitted); scheduler.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=2000) public void testUpdateResourceOnNode() throws Exception { AsyncDispatcher dispatcher=new InlineDispatcher(); Configuration conf=new Configuration(); RMContainerTokenSecretManager containerTokenSecretManager=new RMContainerTokenSecretManager(conf); containerTokenSecretManager.rollMasterKey(); NMTokenSecretManagerInRM nmTokenSecretManager=new NMTokenSecretManagerInRM(conf); nmTokenSecretManager.rollMasterKey(); RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class); RMContext rmContext=new RMContextImpl(dispatcher,null,null,null,null,null,containerTokenSecretManager,nmTokenSecretManager,null,writer); FifoScheduler scheduler=new FifoScheduler(){ @SuppressWarnings("unused") public Map getNodes(){ return nodes; } } ; scheduler.setRMContext(rmContext); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(new Configuration(),rmContext); RMNode node0=MockNodes.newNodeInfo(1,Resources.createResource(2048,4),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node0); scheduler.handle(nodeEvent1); Method method=scheduler.getClass().getDeclaredMethod("getNodes"); @SuppressWarnings("unchecked") Map schedulerNodes=(Map)method.invoke(scheduler); assertEquals(schedulerNodes.values().size(),1); node0.setResourceOption(ResourceOption.newInstance(Resources.createResource(1024,4),RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)); assertEquals(node0.getTotalCapability().getMemory(),1024); assertEquals(schedulerNodes.get(node0.getNodeID()).getAvailableResource().getMemory(),2048); NodeUpdateSchedulerEvent node0Update=new NodeUpdateSchedulerEvent(node0); scheduler.handle(node0Update); assertEquals(schedulerNodes.get(node0.getNodeID()).getAvailableResource().getMemory(),1024); QueueInfo queueInfo=scheduler.getQueueInfo(null,false,false); Assert.assertEquals(0.0f,queueInfo.getCurrentCapacity(),0.0f); int _appId=1; int _appAttemptId=1; ApplicationAttemptId appAttemptId=createAppAttemptId(_appId,_appAttemptId); AppAddedSchedulerEvent appEvent=new AppAddedSchedulerEvent(appAttemptId.getApplicationId(),"queue1","user1"); scheduler.handle(appEvent); AppAttemptAddedSchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false); scheduler.handle(attemptEvent); int memory=1024; int priority=1; List ask=new ArrayList(); ResourceRequest nodeLocal=createResourceRequest(memory,node0.getHostName(),priority,1); ResourceRequest rackLocal=createResourceRequest(memory,node0.getRackName(),priority,1); ResourceRequest any=createResourceRequest(memory,ResourceRequest.ANY,priority,1); ask.add(nodeLocal); ask.add(rackLocal); ask.add(any); scheduler.allocate(appAttemptId,ask,new ArrayList(),null,null); Assert.assertEquals(1,nodeLocal.getNumContainers()); scheduler.handle(node0Update); Assert.assertEquals(0,nodeLocal.getNumContainers()); SchedulerAppReport info=scheduler.getSchedulerAppInfo(appAttemptId); Assert.assertEquals(1,info.getLiveContainers().size()); queueInfo=scheduler.getQueueInfo(null,false,false); Assert.assertEquals(1.0f,queueInfo.getCurrentCapacity(),0.0f); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test(timeout=2000) public void testNodeLocalAssignment() throws Exception { AsyncDispatcher dispatcher=new InlineDispatcher(); Configuration conf=new Configuration(); RMContainerTokenSecretManager containerTokenSecretManager=new RMContainerTokenSecretManager(conf); containerTokenSecretManager.rollMasterKey(); NMTokenSecretManagerInRM nmTokenSecretManager=new NMTokenSecretManagerInRM(conf); nmTokenSecretManager.rollMasterKey(); RMApplicationHistoryWriter writer=mock(RMApplicationHistoryWriter.class); RMContext rmContext=new RMContextImpl(dispatcher,null,null,null,null,null,containerTokenSecretManager,nmTokenSecretManager,null,writer); FifoScheduler scheduler=new FifoScheduler(); scheduler.setRMContext(rmContext); scheduler.init(conf); scheduler.start(); scheduler.reinitialize(new Configuration(),rmContext); RMNode node0=MockNodes.newNodeInfo(1,Resources.createResource(1024 * 64),1,"127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1=new NodeAddedSchedulerEvent(node0); scheduler.handle(nodeEvent1); int _appId=1; int _appAttemptId=1; ApplicationAttemptId appAttemptId=createAppAttemptId(_appId,_appAttemptId); AppAddedSchedulerEvent appEvent=new AppAddedSchedulerEvent(appAttemptId.getApplicationId(),"queue1","user1"); scheduler.handle(appEvent); AppAttemptAddedSchedulerEvent attemptEvent=new AppAttemptAddedSchedulerEvent(appAttemptId,false); scheduler.handle(attemptEvent); int memory=64; int nConts=3; int priority=20; List ask=new ArrayList(); ResourceRequest nodeLocal=createResourceRequest(memory,node0.getHostName(),priority,nConts); ResourceRequest rackLocal=createResourceRequest(memory,node0.getRackName(),priority,nConts); ResourceRequest any=createResourceRequest(memory,ResourceRequest.ANY,priority,nConts); ask.add(nodeLocal); ask.add(rackLocal); ask.add(any); scheduler.allocate(appAttemptId,ask,new ArrayList(),null,null); NodeUpdateSchedulerEvent node0Update=new NodeUpdateSchedulerEvent(node0); Assert.assertEquals(3,nodeLocal.getNumContainers()); scheduler.handle(node0Update); Assert.assertEquals(0,nodeLocal.getNumContainers()); SchedulerAppReport info=scheduler.getSchedulerAppInfo(appAttemptId); Assert.assertEquals(3,info.getLiveContainers().size()); scheduler.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=5000) public void testFifoSchedulerCapacityWhenNoNMs(){ FifoScheduler scheduler=new FifoScheduler(); QueueInfo queueInfo=scheduler.getQueueInfo(null,false,false); Assert.assertEquals(0.0f,queueInfo.getCurrentCapacity(),0.0f); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.security.TestAMRMTokens

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=20000) public void testAMRMMasterKeysUpdate() throws Exception { MockRM rm=new MockRM(conf){ @Override protected void doSecureLogin() throws IOException { } } ; rm.start(); MockNM nm=rm.registerNode("127.0.0.1:1234",8000); RMApp app=rm.submitApp(200); MockAM am=MockRM.launchAndRegisterAM(app,rm,nm); AllocateResponse response=am.allocate(Records.newRecord(AllocateRequest.class)); Assert.assertNull(response.getAMRMToken()); rm.getRMContext().getAMRMTokenSecretManager().rollMasterKey(); response=am.allocate(Records.newRecord(AllocateRequest.class)); Assert.assertNotNull(response.getAMRMToken()); Token amrmToken=ConverterUtils.convertFromYarn(response.getAMRMToken(),new Text(response.getAMRMToken().getService())); Assert.assertEquals(amrmToken.decodeIdentifier().getKeyId(),rm.getRMContext().getAMRMTokenSecretManager().getMasterKey().getMasterKey().getKeyId()); response=am.allocate(Records.newRecord(AllocateRequest.class)); Assert.assertNull(response.getAMRMToken()); rm.getRMContext().getAMRMTokenSecretManager().activateNextMasterKey(); response=am.allocate(Records.newRecord(AllocateRequest.class)); Assert.assertNull(response.getAMRMToken()); rm.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer

    APIUtilityVerifier UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    /** * Basic idea of the test: * 1. create tokens. * 2. Mark one of them to be renewed in 2 seconds (instead of * 24 hours) * 3. register them for renewal * 4. sleep for 3 seconds * 5. count number of renewals (should 3 initial ones + one extra) * 6. register another token for 2 seconds * 7. cancel it immediately * 8. Sleep and check that the 2 seconds renew didn't happen * (totally 5 renewals) * 9. check cancellation * @throws IOException * @throws URISyntaxException */ @Test(timeout=60000) public void testDTRenewal() throws Exception { MyFS dfs=(MyFS)FileSystem.get(conf); LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ conf.hashCode()); MyToken token1, token2, token3; token1=dfs.getDelegationToken("user1"); token2=dfs.getDelegationToken("user2"); token3=dfs.getDelegationToken("user3"); Renewer.tokenToRenewIn2Sec=token1; LOG.info("token=" + token1 + " should be renewed for 2 secs"); String nn1=DelegationTokenRenewer.SCHEME + "://host1:0"; String nn2=DelegationTokenRenewer.SCHEME + "://host2:0"; String nn3=DelegationTokenRenewer.SCHEME + "://host3:0"; Credentials ts=new Credentials(); ts.addToken(new Text(nn1),token1); ts.addToken(new Text(nn2),token2); ts.addToken(new Text(nn3),token3); ApplicationId applicationId_0=BuilderUtils.newApplicationId(0,0); delegationTokenRenewer.addApplicationAsync(applicationId_0,ts,true); waitForEventsToGetProcessed(delegationTokenRenewer); int numberOfExpectedRenewals=3 + 1; int attempts=10; while (attempts-- > 0) { try { Thread.sleep(3 * 1000); } catch ( InterruptedException e) { } if (Renewer.counter == numberOfExpectedRenewals) break; } LOG.info("dfs=" + dfs.hashCode() + ";Counter = "+ Renewer.counter+ ";t="+ Renewer.lastRenewed); assertEquals("renew wasn't called as many times as expected(4):",numberOfExpectedRenewals,Renewer.counter); assertEquals("most recently renewed token mismatch",Renewer.lastRenewed,token1); ts=new Credentials(); MyToken token4=dfs.getDelegationToken("user4"); Renewer.tokenToRenewIn2Sec=token4; LOG.info("token=" + token4 + " should be renewed for 2 secs"); String nn4=DelegationTokenRenewer.SCHEME + "://host4:0"; ts.addToken(new Text(nn4),token4); ApplicationId applicationId_1=BuilderUtils.newApplicationId(0,1); delegationTokenRenewer.addApplicationAsync(applicationId_1,ts,true); waitForEventsToGetProcessed(delegationTokenRenewer); delegationTokenRenewer.applicationFinished(applicationId_1); waitForEventsToGetProcessed(delegationTokenRenewer); numberOfExpectedRenewals=Renewer.counter; try { Thread.sleep(6 * 1000); } catch ( InterruptedException e) { } LOG.info("Counter = " + Renewer.counter + ";t="+ Renewer.lastRenewed); assertEquals("renew wasn't called as many times as expected",numberOfExpectedRenewals,Renewer.counter); try { token4.renew(conf); fail("Renewal of cancelled token should have failed"); } catch ( InvalidToken ite) { } }

    EqualityVerifier 
    /** * Basic idea of the test: * 1. register a token for 2 seconds with no cancel at the end * 2. cancel it immediately * 3. Sleep and check that the 2 seconds renew didn't happen * (totally 5 renewals) * 4. check cancellation * @throws IOException * @throws URISyntaxException */ @Test(timeout=60000) public void testDTRenewalWithNoCancel() throws Exception { MyFS dfs=(MyFS)FileSystem.get(conf); LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ conf.hashCode()); Credentials ts=new Credentials(); MyToken token1=dfs.getDelegationToken("user1"); Renewer.tokenToRenewIn2Sec=token1; LOG.info("token=" + token1 + " should be renewed for 2 secs"); String nn1=DelegationTokenRenewer.SCHEME + "://host1:0"; ts.addToken(new Text(nn1),token1); ApplicationId applicationId_1=BuilderUtils.newApplicationId(0,1); delegationTokenRenewer.addApplicationAsync(applicationId_1,ts,false); waitForEventsToGetProcessed(delegationTokenRenewer); delegationTokenRenewer.applicationFinished(applicationId_1); waitForEventsToGetProcessed(delegationTokenRenewer); int numberOfExpectedRenewals=Renewer.counter; try { Thread.sleep(6 * 1000); } catch ( InterruptedException e) { } LOG.info("Counter = " + Renewer.counter + ";t="+ Renewer.lastRenewed); assertEquals("renew wasn't called as many times as expected",numberOfExpectedRenewals,Renewer.counter); token1.renew(conf); }

    APIUtilityVerifier BranchVerifier UtilityVerifier EqualityVerifier HybridVerifier 
    /** * Basic idea of the test: * 0. Setup token KEEP_ALIVE * 1. create tokens. * 2. register them for renewal - to be cancelled on app complete * 3. Complete app. * 4. Verify token is alive within the KEEP_ALIVE time * 5. Verify token has been cancelled after the KEEP_ALIVE_TIME * @throws IOException * @throws URISyntaxException */ @Test(timeout=60000) public void testDTKeepAlive1() throws Exception { Configuration lconf=new Configuration(conf); lconf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,true); lconf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,6000l); lconf.setLong(YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS,1000l); DelegationTokenRenewer localDtr=createNewDelegationTokenRenewer(lconf,counter); RMContext mockContext=mock(RMContext.class); ClientRMService mockClientRMService=mock(ClientRMService.class); when(mockContext.getClientRMService()).thenReturn(mockClientRMService); when(mockContext.getDelegationTokenRenewer()).thenReturn(localDtr); when(mockContext.getDispatcher()).thenReturn(dispatcher); InetSocketAddress sockAddr=InetSocketAddress.createUnresolved("localhost",1234); when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); localDtr.setRMContext(mockContext); localDtr.init(lconf); localDtr.start(); MyFS dfs=(MyFS)FileSystem.get(lconf); LOG.info("dfs=" + (Object)dfs.hashCode() + ";conf="+ lconf.hashCode()); Credentials ts=new Credentials(); MyToken token1=dfs.getDelegationToken("user1"); String nn1=DelegationTokenRenewer.SCHEME + "://host1:0"; ts.addToken(new Text(nn1),token1); ApplicationId applicationId_0=BuilderUtils.newApplicationId(0,0); localDtr.addApplicationAsync(applicationId_0,ts,true); waitForEventsToGetProcessed(localDtr); if (!eventQueue.isEmpty()) { Event evt=eventQueue.take(); if (evt instanceof RMAppEvent) { Assert.assertEquals(((RMAppEvent)evt).getType(),RMAppEventType.START); } else { fail("RMAppEvent.START was expected!!"); } } localDtr.applicationFinished(applicationId_0); waitForEventsToGetProcessed(localDtr); token1.renew(lconf); Thread.sleep(10000l); try { token1.renew(lconf); fail("Renewal of cancelled token should have failed"); } catch ( InvalidToken ite) { } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.security.TestRMDelegationTokens

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=15000) public void testRMDTMasterKeyStateOnRollingMasterKey() throws Exception { MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Map rmDTState=rmState.getRMDTSecretManagerState().getTokenState(); Set rmDTMasterKeyState=rmState.getRMDTSecretManagerState().getMasterKeyState(); MockRM rm1=new MyMockRM(conf,memStore); rm1.start(); RMDelegationTokenSecretManager dtSecretManager=rm1.getRMContext().getRMDelegationTokenSecretManager(); Assert.assertEquals(dtSecretManager.getAllMasterKeys(),rmDTMasterKeyState); Set expiringKeys=new HashSet(); expiringKeys.addAll(dtSecretManager.getAllMasterKeys()); GetDelegationTokenRequest request=mock(GetDelegationTokenRequest.class); when(request.getRenewer()).thenReturn("renewer1"); GetDelegationTokenResponse response=rm1.getClientRMService().getDelegationToken(request); org.apache.hadoop.yarn.api.records.Token delegationToken=response.getRMDelegationToken(); Token token1=ConverterUtils.convertFromYarn(delegationToken,(Text)null); RMDelegationTokenIdentifier dtId1=token1.decodeIdentifier(); while (((TestRMDelegationTokenSecretManager)dtSecretManager).numUpdatedKeys.get() < 3) { ((TestRMDelegationTokenSecretManager)dtSecretManager).checkCurrentKeyInStateStore(rmDTMasterKeyState); Thread.sleep(100); } int count=0; while (rmDTState.containsKey(dtId1) && count < 100) { Thread.sleep(100); count++; } rm1.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test(timeout=15000) public void testRemoveExpiredMasterKeyInRMStateStore() throws Exception { MemoryRMStateStore memStore=new MemoryRMStateStore(); memStore.init(conf); RMState rmState=memStore.getState(); Set rmDTMasterKeyState=rmState.getRMDTSecretManagerState().getMasterKeyState(); MockRM rm1=new MyMockRM(conf,memStore); rm1.start(); RMDelegationTokenSecretManager dtSecretManager=rm1.getRMContext().getRMDelegationTokenSecretManager(); Assert.assertEquals(dtSecretManager.getAllMasterKeys(),rmDTMasterKeyState); Set expiringKeys=new HashSet(); expiringKeys.addAll(dtSecretManager.getAllMasterKeys()); while (true) { boolean allExpired=true; for ( DelegationKey key : expiringKeys) { if (rmDTMasterKeyState.contains(key)) { allExpired=false; } } if (allExpired) break; Thread.sleep(500); } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebApp

    InternalCallVerifier EqualityVerifier 
    @Test public void testControllerIndex(){ Injector injector=WebAppTests.createMockInjector(TestRMWebApp.class,this,new Module(){ @Override public void configure( Binder binder){ binder.bind(ApplicationACLsManager.class).toInstance(new ApplicationACLsManager(new Configuration())); } } ); RmController c=injector.getInstance(RmController.class); c.index(); assertEquals("Applications",c.get(TITLE,"unknown")); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServices

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfoXML() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("info").accept("application/xml").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); verifyClusterInfoXML(xml); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidAccept() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("cluster").accept(MediaType.TEXT_PLAIN).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.INTERNAL_SERVER_ERROR,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterInfo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterMetricsDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("metrics").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterMetricsJSON(json); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidUri2() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterSchedulerFifo() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterSchedulerFifo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfoSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("info/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterInfo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterSchedulerFifoSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterSchedulerFifo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testCluster() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterInfo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfoDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("info").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterInfo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterInfo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterMetrics() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("metrics").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterMetricsJSON(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testInfo() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("info").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterInfo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterSchedulerFifoDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterSchedulerFifo(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterMetricsSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("metrics/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterMetricsJSON(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterMetricsXML() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("metrics").accept("application/xml").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); verifyClusterMetricsXML(xml); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterSchedulerFifoXML() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); verifySchedulerFifoXML(xml); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidUri() throws JSONException, Exception { WebResource r=resource(); String responseStr=""; try { responseStr=r.path("ws").path("v1").path("cluster").path("bogus").accept(MediaType.APPLICATION_JSON).get(String.class); fail("should have thrown exception on invalid uri"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); WebServicesTestUtils.checkStringMatch("error string exists and shouldn't","",responseStr); } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesApps

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testAppsQueryStates() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); RMApp killedApp=rm.submitApp(CONTAINER_MB); rm.killApp(killedApp.getApplicationId()); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); MultivaluedMapImpl params=new MultivaluedMapImpl(); params.add("states",YarnApplicationState.ACCEPTED.toString()); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state")); r=resource(); params=new MultivaluedMapImpl(); params.add("states",YarnApplicationState.ACCEPTED.toString()); params.add("states",YarnApplicationState.KILLED.toString()); response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED"))); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryStatesNone() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("states",YarnApplicationState.RUNNING.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("apps is not null",JSONObject.NULL,json.get("apps")); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryStartEnd() throws JSONException, Exception { rm.start(); rm.registerNode("127.0.0.1:1234",2048); long end=System.currentTimeMillis(); Thread.sleep(1); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("apps is not null",JSONObject.NULL,json.get("apps")); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryStartBegin() throws JSONException, Exception { rm.start(); long start=System.currentTimeMillis(); Thread.sleep(1); rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",3,array.length()); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryLimit() throws JSONException, Exception { rm.start(); rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("limit","2").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppsXMLMulti() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB,"testwordcount","user1"); rm.submitApp(2048,"testwordcount2","user1"); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodesApps=dom.getElementsByTagName("apps"); assertEquals("incorrect number of elements",1,nodesApps.getLength()); NodeList nodes=dom.getElementsByTagName("app"); assertEquals("incorrect number of elements",2,nodes.getLength()); rm.stop(); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNonexistApp() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB,"testwordcount","user1"); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").path("application_00000_0099").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid appid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id: application_00000_0099 not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } finally { rm.stop(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryFinalStatus() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); RMApp app1=rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finalStatus",FinalApplicationStatus.UNDEFINED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); System.out.println(json.toString()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); verifyAppInfo(array.getJSONObject(0),app1); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryFinishBegin() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); long start=System.currentTimeMillis(); Thread.sleep(1); RMApp app1=rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); am.registerAppAttempt(); am.unregisterAppAttempt(); amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); rm.stop(); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testAppsQueryFinalStatusInvalid() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").queryParam("finalStatus","INVALID_test").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid state query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","org.apache.hadoop.yarn.api.records.FinalApplicationStatus.INVALID_test",message); WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname); } finally { rm.stop(); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleAppsXML() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1"); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").path(app1.getApplicationId().toString()).accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("app"); assertEquals("incorrect number of elements",1,nodes.getLength()); verifyAppsXML(nodes,app1); rm.stop(); }

    IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testAppStatistics() throws JSONException, Exception { try { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",4096); Thread.sleep(1); RMApp app1=rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE"); amNodeManager.nodeHeartbeat(true); MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); am.registerAppAttempt(); am.unregisterAppAttempt(); amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE); rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE"); rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"OTHER"); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("appstatistics").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject appsStatInfo=json.getJSONObject("appStatInfo"); assertEquals("incorrect number of elements",1,appsStatInfo.length()); JSONArray statItems=appsStatInfo.getJSONArray("statItem"); assertEquals("incorrect number of elements",YarnApplicationState.values().length,statItems.length()); for (int i=0; i < YarnApplicationState.values().length; ++i) { assertEquals("*",statItems.getJSONObject(0).getString("type")); if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) { assertEquals("2",statItems.getJSONObject(0).getString("count")); } else if (statItems.getJSONObject(0).getString("state").equals("FINISHED")) { assertEquals("1",statItems.getJSONObject(0).getString("count")); } else { assertEquals("0",statItems.getJSONObject(0).getString("count")); } } r=resource(); response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states",YarnApplicationState.ACCEPTED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); appsStatInfo=json.getJSONObject("appStatInfo"); assertEquals("incorrect number of elements",1,appsStatInfo.length()); statItems=appsStatInfo.getJSONArray("statItem"); assertEquals("incorrect number of elements",1,statItems.length()); assertEquals("ACCEPTED",statItems.getJSONObject(0).getString("state")); assertEquals("*",statItems.getJSONObject(0).getString("type")); assertEquals("2",statItems.getJSONObject(0).getString("count")); r=resource(); response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); appsStatInfo=json.getJSONObject("appStatInfo"); assertEquals("incorrect number of elements",1,appsStatInfo.length()); statItems=appsStatInfo.getJSONArray("statItem"); assertEquals("incorrect number of elements",YarnApplicationState.values().length,statItems.length()); for (int i=0; i < YarnApplicationState.values().length; ++i) { assertEquals("mapreduce",statItems.getJSONObject(0).getString("type")); if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) { assertEquals("1",statItems.getJSONObject(0).getString("count")); } else if (statItems.getJSONObject(0).getString("state").equals("FINISHED")) { assertEquals("1",statItems.getJSONObject(0).getString("count")); } else { assertEquals("0",statItems.getJSONObject(0).getString("count")); } } r=resource(); response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("applicationTypes","MAPREDUCE,OTHER").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject exception=json.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String className=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","we temporarily support at most one applicationType",message); WebServicesTestUtils.checkStringEqual("exception type","BadRequestException",type); WebServicesTestUtils.checkStringEqual("exception className","org.apache.hadoop.yarn.webapp.BadRequestException",className); r=resource(); response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states",YarnApplicationState.FINISHED.toString() + "," + YarnApplicationState.ACCEPTED.toString()).queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); appsStatInfo=json.getJSONObject("appStatInfo"); assertEquals("incorrect number of elements",1,appsStatInfo.length()); statItems=appsStatInfo.getJSONArray("statItem"); assertEquals("incorrect number of elements",2,statItems.length()); JSONObject statItem1=statItems.getJSONObject(0); JSONObject statItem2=statItems.getJSONObject(1); assertTrue((statItem1.getString("state").equals("ACCEPTED") && statItem2.getString("state").equals("FINISHED")) || (statItem2.getString("state").equals("ACCEPTED") && statItem1.getString("state").equals("FINISHED"))); assertEquals("mapreduce",statItem1.getString("type")); assertEquals("1",statItem1.getString("count")); assertEquals("mapreduce",statItem2.getString("type")); assertEquals("1",statItem2.getString("count")); r=resource(); response=r.path("ws").path("v1").path("cluster").path("appstatistics").queryParam("states","wrong_state").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); exception=json.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); message=exception.getString("message"); type=exception.getString("exception"); className=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","Invalid application-state wrong_state",message); WebServicesTestUtils.checkStringEqual("exception type","BadRequestException",type); WebServicesTestUtils.checkStringEqual("exception className","org.apache.hadoop.yarn.webapp.BadRequestException",className); } finally { rm.stop(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryQueue() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("queue","default").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryStateNone() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("state",YarnApplicationState.RUNNING.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("apps is not null",JSONObject.NULL,json.get("apps")); rm.stop(); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidApp() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").path("application_invalid_12").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid appid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","For input string: \"invalid\"",message); WebServicesTestUtils.checkStringMatch("exception type","NumberFormatException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.NumberFormatException",classname); } finally { rm.stop(); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryFinishEnd() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); RMApp app1=rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); am.registerAppAttempt(); am.unregisterAppAttempt(); amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); long end=System.currentTimeMillis(); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",3,array.length()); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryUser() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("user",UserGroupInformation.getCurrentUser().getShortUserName()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryStartBeginEnd() throws JSONException, Exception { rm.start(); rm.registerNode("127.0.0.1:1234",2048); long start=System.currentTimeMillis(); Thread.sleep(1); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); long end=System.currentTimeMillis(); Thread.sleep(1); rm.submitApp(CONTAINER_MB); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).queryParam("startedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); rm.stop(); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testAppsQueryStatesInvalid() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").queryParam("states","INVALID_test").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid state query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","Invalid application-state INVALID_test",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } finally { rm.stop(); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidAppAttempts() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").path("application_invalid_12").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid appid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","For input string: \"invalid\"",message); WebServicesTestUtils.checkStringMatch("exception type","NumberFormatException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.NumberFormatException",classname); } finally { rm.stop(); } }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testAppsQueryAppTypes() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); Thread.sleep(1); RMApp app1=rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); am.registerAppAttempt(); am.unregisterAppAttempt(); amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE); rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"MAPREDUCE"); rm.submitApp(CONTAINER_MB,"",UserGroupInformation.getCurrentUser().getShortUserName(),null,false,null,2,null,"NON-YARN"); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); assertEquals("MAPREDUCE",array.getJSONObject(0).getString("applicationType")); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE"))); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN"))); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",3,array.length()); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN,NON-YARN").queryParam("applicationTypes","MAPREDUCE").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",3,array.length()); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN").queryParam("applicationTypes","").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); assertEquals("YARN",array.getJSONObject(0).getString("applicationType")); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, YARN ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); assertEquals("YARN",array.getJSONObject(0).getString("applicationType")); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes",",,, ,, ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",3,array.length()); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes","YARN, ,NON-YARN, ,,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("NON-YARN")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("NON-YARN"))); r=resource(); response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("applicationTypes"," YARN, , ,,,").queryParam("applicationTypes","MAPREDUCE , ,, ,").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue((array.getJSONObject(0).getString("applicationType").equals("YARN") && array.getJSONObject(1).getString("applicationType").equals("MAPREDUCE")) || (array.getJSONObject(1).getString("applicationType").equals("YARN") && array.getJSONObject(0).getString("applicationType").equals("MAPREDUCE"))); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryState() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); RMApp app1=rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("state",YarnApplicationState.ACCEPTED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); verifyAppInfo(array.getJSONObject(0),app1); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryFinishBeginEnd() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); long start=System.currentTimeMillis(); Thread.sleep(1); RMApp app1=rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); MockAM am=rm.sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId()); am.registerAppAttempt(); am.unregisterAppAttempt(); amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),1,ContainerState.COMPLETE); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); long end=System.currentTimeMillis(); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finishedTimeBegin",String.valueOf(start)).queryParam("finishedTimeEnd",String.valueOf(end)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); rm.stop(); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNonexistAppAttempts() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB,"testwordcount","user1"); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").path("application_00000_0099").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid appid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","java.lang.Exception: app with id: application_00000_0099 not found",message); WebServicesTestUtils.checkStringMatch("exception type","NotFoundException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.NotFoundException",classname); } finally { rm.stop(); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppAttemptsXML() throws JSONException, Exception { rm.start(); String user="user1"; MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount",user); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").path(app1.getApplicationId().toString()).path("appattempts").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("appAttempts"); assertEquals("incorrect number of elements",1,nodes.getLength()); NodeList attempt=dom.getElementsByTagName("appAttempt"); assertEquals("incorrect number of elements",1,attempt.getLength()); verifyAppAttemptsXML(attempt,app1.getCurrentAppAttempt(),user); rm.stop(); }

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=20000) public void testMultipleAppAttempts() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",8192); RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1"); MockAM am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager); int maxAppAttempts=rm.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); assertTrue(maxAppAttempts > 1); int numAttempt=1; while (true) { amNodeManager.nodeHeartbeat(am.getApplicationAttemptId(),1,ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FAILED); if (numAttempt == maxAppAttempts) { rm.waitForState(app1.getApplicationId(),RMAppState.FAILED); break; } rm.waitForState(app1.getApplicationId(),RMAppState.ACCEPTED); am=MockRM.launchAndRegisterAM(app1,rm,amNodeManager); numAttempt++; } assertEquals("incorrect number of attempts",maxAppAttempts,app1.getAppAttempts().values().size()); testAppAttemptsHelper(app1.getApplicationId().toString(),app1,MediaType.APPLICATION_JSON); rm.stop(); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testAppsQueryStateInvalid() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("apps").queryParam("state","INVALID_test").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on invalid state query"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","Invalid application-state INVALID_test",message); WebServicesTestUtils.checkStringMatch("exception type","BadRequestException",type); WebServicesTestUtils.checkStringMatch("exception classname","org.apache.hadoop.yarn.webapp.BadRequestException",classname); } finally { rm.stop(); } }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testAppsQueryStatesComma() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); RMApp killedApp=rm.submitApp(CONTAINER_MB); rm.killApp(killedApp.getApplicationId()); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); MultivaluedMapImpl params=new MultivaluedMapImpl(); params.add("states",YarnApplicationState.ACCEPTED.toString()); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); assertEquals("state not equal to ACCEPTED","ACCEPTED",array.getJSONObject(0).getString("state")); r=resource(); params=new MultivaluedMapImpl(); params.add("states",YarnApplicationState.ACCEPTED.toString() + "," + YarnApplicationState.KILLED.toString()); response=r.path("ws").path("v1").path("cluster").path("apps").queryParams(params).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",2,array.length()); assertTrue("both app states of ACCEPTED and KILLED are not present",(array.getJSONObject(0).getString("state").equals("ACCEPTED") && array.getJSONObject(1).getString("state").equals("KILLED")) || (array.getJSONObject(0).getString("state").equals("KILLED") && array.getJSONObject(1).getString("state").equals("ACCEPTED"))); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryFinalStatusNone() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("finalStatus",FinalApplicationStatus.KILLED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("apps is not null",JSONObject.NULL,json.get("apps")); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppsXML() throws JSONException, Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); RMApp app1=rm.submitApp(CONTAINER_MB,"testwordcount","user1"); amNodeManager.nodeHeartbeat(true); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodesApps=dom.getElementsByTagName("apps"); assertEquals("incorrect number of elements",1,nodesApps.getLength()); NodeList nodes=dom.getElementsByTagName("app"); assertEquals("incorrect number of elements",1,nodes.getLength()); verifyAppsXML(nodes,app1); rm.stop(); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppsQueryStartBeginSome() throws JSONException, Exception { rm.start(); rm.registerNode("127.0.0.1:1234",2048); rm.submitApp(CONTAINER_MB); rm.submitApp(CONTAINER_MB); long start=System.currentTimeMillis(); Thread.sleep(1); rm.submitApp(CONTAINER_MB); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("apps").queryParam("startedTimeBegin",String.valueOf(start)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject apps=json.getJSONObject("apps"); assertEquals("incorrect number of elements",1,apps.length()); JSONArray array=apps.getJSONArray("app"); assertEquals("incorrect number of elements",1,array.length()); rm.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesAppsModification

    APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleAppKillInvalidState() throws Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML}; MediaType[] contentTypes={MediaType.APPLICATION_JSON_TYPE,MediaType.APPLICATION_XML_TYPE}; String[] targetStates={YarnApplicationState.FINISHED.toString(),"blah"}; for ( String mediaType : mediaTypes) { for ( MediaType contentType : contentTypes) { for ( String targetStateString : targetStates) { RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName); amNodeManager.nodeHeartbeat(true); ClientResponse response; AppState targetState=new AppState(targetStateString); Object entity; if (contentType == MediaType.APPLICATION_JSON_TYPE) { entity=appStateToJSON(targetState); } else { entity=targetState; } response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").entity(entity,contentType).accept(mediaType).put(ClientResponse.class); if (!isAuthenticationEnabled()) { assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus()); continue; } assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); } } } rm.stop(); return; }

    APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=90000) public void testSingleAppKill() throws Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML}; MediaType[] contentTypes={MediaType.APPLICATION_JSON_TYPE,MediaType.APPLICATION_XML_TYPE}; for ( String mediaType : mediaTypes) { for ( MediaType contentType : contentTypes) { RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName); amNodeManager.nodeHeartbeat(true); ClientResponse response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).get(ClientResponse.class); AppState targetState=new AppState(YarnApplicationState.KILLED.toString()); Object entity; if (contentType == MediaType.APPLICATION_JSON_TYPE) { entity=appStateToJSON(targetState); } else { entity=targetState; } response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").entity(entity,contentType).accept(mediaType).put(ClientResponse.class); if (!isAuthenticationEnabled()) { assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus()); continue; } assertEquals(Status.ACCEPTED,response.getClientResponseStatus()); if (mediaType == MediaType.APPLICATION_JSON) { verifyAppStateJson(response,RMAppState.KILLING,RMAppState.ACCEPTED); } else { verifyAppStateXML(response,RMAppState.KILLING,RMAppState.ACCEPTED); } String locationHeaderValue=response.getHeaders().getFirst(HttpHeaders.LOCATION); Client c=Client.create(); WebResource tmp=c.resource(locationHeaderValue); if (isAuthenticationEnabled()) { tmp=tmp.queryParam("user.name",webserviceUserName); } response=tmp.get(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); assertTrue(locationHeaderValue.endsWith("/ws/v1/cluster/apps/" + app.getApplicationId().toString() + "/state")); while (true) { Thread.sleep(100); response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).entity(entity,contentType).put(ClientResponse.class); assertTrue((response.getClientResponseStatus() == Status.ACCEPTED) || (response.getClientResponseStatus() == Status.OK)); if (response.getClientResponseStatus() == Status.OK) { assertEquals(RMAppState.KILLED,app.getState()); if (mediaType == MediaType.APPLICATION_JSON) { verifyAppStateJson(response,RMAppState.KILLED); } else { verifyAppStateXML(response,RMAppState.KILLED); } break; } } } } rm.stop(); return; }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleAppState() throws Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML}; for ( String mediaType : mediaTypes) { RMApp app=rm.submitApp(CONTAINER_MB,"",webserviceUserName); amNodeManager.nodeHeartbeat(true); ClientResponse response=this.constructWebResource("apps",app.getApplicationId().toString(),"state").accept(mediaType).get(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); if (mediaType == MediaType.APPLICATION_JSON) { verifyAppStateJson(response,RMAppState.ACCEPTED); } else if (mediaType == MediaType.APPLICATION_XML) { verifyAppStateXML(response,RMAppState.ACCEPTED); } } rm.stop(); }

    APIUtilityVerifier BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleAppKillInvalidId() throws Exception { rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); amNodeManager.nodeHeartbeat(true); String[] testAppIds={"application_1391705042196_0001","random_string"}; for ( String testAppId : testAppIds) { AppState info=new AppState("KILLED"); ClientResponse response=this.constructWebResource("apps",testAppId,"state").accept(MediaType.APPLICATION_XML).entity(info,MediaType.APPLICATION_XML).put(ClientResponse.class); if (!isAuthenticationEnabled()) { assertEquals(Status.UNAUTHORIZED,response.getClientResponseStatus()); continue; } assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); } rm.stop(); return; }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testAppSubmitBadJsonAndXML() throws Exception { String urlPath="apps"; rm.start(); MockNM amNodeManager=rm.registerNode("127.0.0.1:1234",2048); amNodeManager.nodeHeartbeat(true); ApplicationSubmissionContextInfo appInfo=new ApplicationSubmissionContextInfo(); appInfo.setApplicationName("test"); appInfo.setPriority(3); appInfo.setMaxAppAttempts(2); appInfo.setQueue("testqueue"); appInfo.setApplicationType("test-type"); HashMap lr=new HashMap(); LocalResourceInfo y=new LocalResourceInfo(); y.setUrl(new URI("http://www.test.com/file.txt")); y.setSize(100); y.setTimestamp(System.currentTimeMillis()); y.setType(LocalResourceType.FILE); y.setVisibility(LocalResourceVisibility.APPLICATION); lr.put("example",y); appInfo.getContainerLaunchContextInfo().setResources(lr); appInfo.getResource().setMemory(1024); appInfo.getResource().setvCores(1); String body=""; ClientResponse response=this.constructWebResource(urlPath).accept(MediaType.APPLICATION_XML).entity(body,MediaType.APPLICATION_XML).post(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); body="{\"a\" : \"b\"}"; response=this.constructWebResource(urlPath).accept(MediaType.APPLICATION_XML).entity(body,MediaType.APPLICATION_JSON).post(ClientResponse.class); validateResponseStatus(response,Status.BAD_REQUEST); rm.stop(); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesCapacitySched

    APIUtilityVerifier IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testPerUserResourcesJSON() throws Exception { rm.start(); try { rm.submitApp(10,"app1","user1",null,"b1"); rm.submitApp(20,"app2","user2",null,"b1"); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); JSONObject schedulerInfo=json.getJSONObject("scheduler").getJSONObject("schedulerInfo"); JSONObject b1=getSubQueue(getSubQueue(schedulerInfo,"b"),"b1"); JSONArray users=b1.getJSONObject("users").getJSONArray("user"); for (int i=0; i < 2; ++i) { JSONObject user=users.getJSONObject(i); assertTrue("User isn't user1 or user2",user.getString("username").equals("user1") || user.getString("username").equals("user2")); user.getInt("numActiveApplications"); user.getInt("numPendingApplications"); checkResourcesUsed(user); } } finally { rm.stop(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterSchedulerSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterScheduler(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterScheduler() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterScheduler(json); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testClusterSchedulerXML() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList scheduler=dom.getElementsByTagName("scheduler"); assertEquals("incorrect number of elements",1,scheduler.getLength()); NodeList schedulerInfo=dom.getElementsByTagName("schedulerInfo"); assertEquals("incorrect number of elements",1,schedulerInfo.getLength()); verifyClusterSchedulerXML(schedulerInfo); }

    EqualityVerifier 
    @Test public void testResourceInfo(){ Resource res=Resources.createResource(10,1); assertEquals("",res.toString()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterSchedulerDefault() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterScheduler(json); }

    APIUtilityVerifier IterativeVerifier BranchVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test per user resources and resourcesUsed elements in the web services XML * @throws Exception */ @Test public void testPerUserResourcesXML() throws Exception { rm.start(); try { rm.submitApp(10,"app1","user1",null,"b1"); rm.submitApp(20,"app2","user2",null,"b1"); WebResource r=resource(); ClientResponse response=r.path("ws/v1/cluster/scheduler").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilder db=DocumentBuilderFactory.newInstance().newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList allUsers=dom.getElementsByTagName("users"); for (int i=0; i < allUsers.getLength(); ++i) { Node perUserResources=allUsers.item(i); String queueName=getChildNodeByName(perUserResources.getParentNode(),"queueName").getTextContent(); if (queueName.equals("b1")) { assertEquals(2,perUserResources.getChildNodes().getLength()); NodeList users=perUserResources.getChildNodes(); for (int j=0; j < users.getLength(); ++j) { Node user=users.item(j); String username=getChildNodeByName(user,"username").getTextContent(); assertTrue(username.equals("user1") || username.equals("user2")); Integer.parseInt(getChildNodeByName(getChildNodeByName(user,"resourcesUsed"),"memory").getTextContent()); Integer.parseInt(getChildNodeByName(user,"numActiveApplications").getTextContent()); Integer.parseInt(getChildNodeByName(user,"numPendingApplications").getTextContent()); } } else { assertEquals(0,perUserResources.getChildNodes().getLength()); } } NodeList allResourcesUsed=dom.getElementsByTagName("resourcesUsed"); for (int i=0; i < allResourcesUsed.getLength(); ++i) { Node resourcesUsed=allResourcesUsed.item(i); Integer.parseInt(getChildNodeByName(resourcesUsed,"memory").getTextContent()); Integer.parseInt(getChildNodeByName(resourcesUsed,"vCores").getTextContent()); } } finally { rm.stop(); } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesDelegationTokenAuthentication

    APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testCancelledDelegationToken() throws Exception { String token=getDelegationToken("client"); cancelDelegationToken(token); ApplicationSubmissionContextInfo app=new ApplicationSubmissionContextInfo(); String appid="application_123_0"; app.setApplicationId(appid); String requestBody=getMarshalledAppInfo(app); URL url=new URL("http://localhost:8088/ws/v1/cluster/apps"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(DelegationTokenHeader,token); setupConn(conn,"POST",MediaType.APPLICATION_XML,requestBody); try { conn.getInputStream(); fail("Authentication should fail with expired delegation tokens"); } catch ( IOException e) { assertEquals(Status.FORBIDDEN.getStatusCode(),conn.getResponseCode()); } return; }

    APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testDelegationTokenOps() throws Exception { String token=getDelegationToken("client"); String createRequest="{\"renewer\":\"test\"}"; String renewRequest="{\"token\": \"" + token + "\"}"; String[] requests={createRequest,renewRequest}; for ( String requestBody : requests) { URL url=new URL("http://localhost:8088/ws/v1/cluster/delegation-token"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(DelegationTokenHeader,token); setupConn(conn,"POST",MediaType.APPLICATION_JSON,requestBody); try { conn.getInputStream(); fail("Creation/Renewing delegation tokens should not be " + "allowed with token auth"); } catch ( IOException e) { assertEquals(Status.FORBIDDEN.getStatusCode(),conn.getResponseCode()); } } URL url=new URL("http://localhost:8088/ws/v1/cluster/delegation-token"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(DelegationTokenHeader,token); conn.setRequestProperty(RMWebServices.DELEGATION_TOKEN_HEADER,token); setupConn(conn,"DELETE",null,null); try { conn.getInputStream(); fail("Cancelling delegation tokens should not be allowed with token auth"); } catch ( IOException e) { assertEquals(Status.FORBIDDEN.getStatusCode(),conn.getResponseCode()); } return; }

    APIUtilityVerifier UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testDelegationTokenAuth() throws Exception { final String token=getDelegationToken("test"); ApplicationSubmissionContextInfo app=new ApplicationSubmissionContextInfo(); String appid="application_123_0"; app.setApplicationId(appid); String requestBody=getMarshalledAppInfo(app); URL url=new URL("http://localhost:8088/ws/v1/cluster/apps"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); setupConn(conn,"POST","application/xml",requestBody); try { conn.getInputStream(); fail("we should not be here"); } catch ( IOException e) { assertEquals(Status.UNAUTHORIZED.getStatusCode(),conn.getResponseCode()); } conn=(HttpURLConnection)url.openConnection(); conn.setRequestProperty(DelegationTokenHeader,token); setupConn(conn,"POST",MediaType.APPLICATION_XML,requestBody); conn.getInputStream(); boolean appExists=rm.getRMContext().getRMApps().containsKey(ConverterUtils.toApplicationId(appid)); assertTrue(appExists); RMApp actualApp=rm.getRMContext().getRMApps().get(ConverterUtils.toApplicationId(appid)); String owner=actualApp.getUser(); assertEquals("client",owner); return; }

    Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesDelegationTokens

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testRenewDelegationToken() throws Exception { client().addFilter(new LoggingFilter(System.out)); rm.start(); final String renewer="client2"; this.client().addFilter(new LoggingFilter(System.out)); final DelegationToken dummyToken=new DelegationToken(); dummyToken.setRenewer(renewer); String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML}; for ( final String mediaType : mediaTypes) { for ( final String contentType : mediaTypes) { if (isKerberosAuth == false) { verifySimpleAuthRenew(mediaType,contentType); continue; } final DelegationToken responseToken=KerberosTestUtils.doAsClient(new Callable(){ @Override public DelegationToken call() throws Exception { ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dummyToken,mediaType).post(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); DelegationToken tok=getDelegationTokenFromResponse(response); assertFalse(tok.getToken().isEmpty()); String body=generateRenewTokenBody(mediaType,tok.getToken()); response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,tok.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class); assertEquals(Status.FORBIDDEN,response.getClientResponseStatus()); return tok; } } ); KerberosTestUtils.doAs(renewer,new Callable(){ @Override public DelegationToken call() throws Exception { long oldExpirationTime=Time.now(); assertValidRMToken(responseToken.getToken()); String body=generateRenewTokenBody(mediaType,responseToken.getToken()); ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); DelegationToken tok=getDelegationTokenFromResponse(response); String message="Expiration time not as expected: old = " + oldExpirationTime + "; new = "+ tok.getNextExpirationTime(); assertTrue(message,tok.getNextExpirationTime() > oldExpirationTime); oldExpirationTime=tok.getNextExpirationTime(); Thread.sleep(1000); response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); tok=getDelegationTokenFromResponse(response); message="Expiration time not as expected: old = " + oldExpirationTime + "; new = "+ tok.getNextExpirationTime(); assertTrue(message,tok.getNextExpirationTime() > oldExpirationTime); return tok; } } ); KerberosTestUtils.doAs("client3",new Callable(){ @Override public DelegationToken call() throws Exception { String body=generateRenewTokenBody(mediaType,responseToken.getToken()); ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").header(yarnTokenHeader,responseToken.getToken()).accept(contentType).entity(body,mediaType).post(ClientResponse.class); assertEquals(Status.FORBIDDEN,response.getClientResponseStatus()); return null; } } ); KerberosTestUtils.doAsClient(new Callable(){ @Override public Void call() throws Exception { String token="TEST_TOKEN_STRING"; String body=""; if (mediaType.equals(MediaType.APPLICATION_JSON)) { body="{\"token\": \"" + token + "\" }"; } else { body="" + token + ""; } ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").path("expiration").accept(contentType).entity(body,mediaType).post(ClientResponse.class); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); return null; } } ); } } rm.stop(); return; }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testCancelDelegationToken() throws Exception { rm.start(); this.client().addFilter(new LoggingFilter(System.out)); if (isKerberosAuth == false) { verifySimpleAuthCancel(); return; } final DelegationToken dtoken=new DelegationToken(); String renewer="client2"; dtoken.setRenewer(renewer); String[] mediaTypes={MediaType.APPLICATION_JSON,MediaType.APPLICATION_XML}; for ( final String mediaType : mediaTypes) { for ( final String contentType : mediaTypes) { KerberosTestUtils.doAsClient(new Callable(){ @Override public Void call() throws Exception { ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dtoken,mediaType).post(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); DelegationToken tok=getDelegationTokenFromResponse(response); response=resource().path("ws").path("v1").path("cluster").path("delegation-token").header(yarnTokenHeader,tok.getToken()).accept(contentType).delete(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); assertTokenCancelled(tok.getToken()); return null; } } ); final DelegationToken tmpToken=KerberosTestUtils.doAsClient(new Callable(){ @Override public DelegationToken call() throws Exception { ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dtoken,mediaType).post(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); DelegationToken tok=getDelegationTokenFromResponse(response); return tok; } } ); KerberosTestUtils.doAs(renewer,new Callable(){ @Override public Void call() throws Exception { ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").header(yarnTokenHeader,tmpToken.getToken()).accept(contentType).delete(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); assertTokenCancelled(tmpToken.getToken()); return null; } } ); final DelegationToken tmpToken2=KerberosTestUtils.doAsClient(new Callable(){ @Override public DelegationToken call() throws Exception { ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").accept(contentType).entity(dtoken,mediaType).post(ClientResponse.class); assertEquals(Status.OK,response.getClientResponseStatus()); DelegationToken tok=getDelegationTokenFromResponse(response); return tok; } } ); KerberosTestUtils.doAs("client3",new Callable(){ @Override public Void call() throws Exception { ClientResponse response=resource().path("ws").path("v1").path("cluster").path("delegation-token").header(yarnTokenHeader,tmpToken2.getToken()).accept(contentType).delete(ClientResponse.class); assertEquals(Status.FORBIDDEN,response.getClientResponseStatus()); assertValidRMToken(tmpToken2.getToken()); return null; } } ); testCancelTokenBadRequests(mediaType,contentType); } } rm.stop(); return; }

    Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesFairScheduler

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterScheduler() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterScheduler(json); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testClusterSchedulerSlash() throws JSONException, Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("cluster").path("scheduler/").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); verifyClusterScheduler(json); }

    Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebServicesNodes

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNonexistNode() throws JSONException, Exception { rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid:99").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on non-existent nodeid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyNonexistNodeException(message,type,classname); } finally { rm.stop(); } }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNodesQueryStateInvalid() throws JSONException, Exception { WebResource r=resource(); rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); try { r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states","BOGUSSTATE").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception querying invalid state"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringContains("exception message","org.apache.hadoop.yarn.api.records.NodeState.BOGUSSTATE",message); WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname); } finally { rm.stop(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodesQueryHealthyFalse() throws JSONException, Exception { WebResource r=resource(); MockNM nm1=rm.registerNode("h1:1234",5120); MockNM nm2=rm.registerNode("h2:1235",5121); rm.sendNodeStarted(nm1); rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(),NodeState.NEW); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states","UNHEALTHY").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("nodes is not null",JSONObject.NULL,json.get("nodes")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testSingleNodesXML() throws JSONException, Exception { rm.start(); WebResource r=resource(); MockNM nm1=rm.registerNode("h1:1234",5120); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").path("h1:1234").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("node"); assertEquals("incorrect number of elements",1,nodes.getLength()); verifyNodesXML(nodes,nm1); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSingleNodeQueryStateLost() throws JSONException, Exception { WebResource r=resource(); MockNM nm1=rm.registerNode("h1:1234",5120); MockNM nm2=rm.registerNode("h2:1234",5120); rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm2); rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(),NodeState.RUNNING); rm.sendNodeLost(nm1); rm.sendNodeLost(nm2); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").path("h2:1234").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); JSONObject info=json.getJSONObject("node"); String id=info.get("id").toString(); assertEquals("Incorrect Node Information.","h2:1234",id); RMNode rmNode=rm.getRMContext().getInactiveRMNodes().get("h2"); WebServicesTestUtils.checkStringMatch("nodeHTTPAddress","",info.getString("nodeHTTPAddress")); WebServicesTestUtils.checkStringMatch("state",rmNode.getState().toString(),info.getString("state")); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testQueryAll() throws Exception { WebResource r=resource(); MockNM nm1=rm.registerNode("h1:1234",5120); MockNM nm2=rm.registerNode("h2:1235",5121); MockNM nm3=rm.registerNode("h3:1236",5122); rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm3); rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(),NodeState.NEW); rm.sendNodeLost(nm3); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states",Joiner.on(',').join(EnumSet.allOf(NodeState.class))).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); JSONObject nodes=json.getJSONObject("nodes"); assertEquals("incorrect number of elements",1,nodes.length()); JSONArray nodeArray=nodes.getJSONArray("node"); assertEquals("incorrect number of elements",3,nodeArray.length()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodesQueryStateLost() throws JSONException, Exception { WebResource r=resource(); MockNM nm1=rm.registerNode("h1:1234",5120); MockNM nm2=rm.registerNode("h2:1234",5120); rm.sendNodeStarted(nm1); rm.sendNodeStarted(nm2); rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(),NodeState.RUNNING); rm.sendNodeLost(nm1); rm.sendNodeLost(nm2); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states",NodeState.LOST.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); JSONObject nodes=json.getJSONObject("nodes"); assertEquals("incorrect number of elements",1,nodes.length()); JSONArray nodeArray=nodes.getJSONArray("node"); assertEquals("incorrect number of elements",2,nodeArray.length()); for (int i=0; i < nodeArray.length(); ++i) { JSONObject info=nodeArray.getJSONObject(i); String host=info.get("id").toString().split(":")[0]; RMNode rmNode=rm.getRMContext().getInactiveRMNodes().get(host); WebServicesTestUtils.checkStringMatch("nodeHTTPAddress","",info.getString("nodeHTTPAddress")); WebServicesTestUtils.checkStringMatch("state",rmNode.getState().toString(),info.getString("state")); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodesQueryNew() throws JSONException, Exception { WebResource r=resource(); MockNM nm1=rm.registerNode("h1:1234",5120); MockNM nm2=rm.registerNode("h2:1235",5121); rm.sendNodeStarted(nm1); rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(),NodeState.NEW); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states",NodeState.NEW.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject nodes=json.getJSONObject("nodes"); assertEquals("incorrect number of elements",1,nodes.length()); JSONArray nodeArray=nodes.getJSONArray("node"); assertEquals("incorrect number of elements",1,nodeArray.length()); JSONObject info=nodeArray.getJSONObject(0); verifyNodeInfo(info,nm2); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodesQueryStateNone() throws JSONException, Exception { WebResource r=resource(); rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states",NodeState.DECOMMISSIONED.toString()).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); assertEquals("nodes is not null",JSONObject.NULL,json.get("nodes")); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNodes2XML() throws JSONException, Exception { rm.start(); WebResource r=resource(); rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodesApps=dom.getElementsByTagName("nodes"); assertEquals("incorrect number of elements",1,nodesApps.getLength()); NodeList nodes=dom.getElementsByTagName("node"); assertEquals("incorrect number of elements",2,nodes.getLength()); rm.stop(); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNonexistNodeXML() throws JSONException, Exception { rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid:99").accept(MediaType.APPLICATION_XML).get(JSONObject.class); fail("should have thrown exception on non-existent nodeid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String msg=response.getEntity(String.class); System.out.println(msg); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(msg)); Document dom=db.parse(is); NodeList nodes=dom.getElementsByTagName("RemoteException"); Element element=(Element)nodes.item(0); String message=WebServicesTestUtils.getXmlString(element,"message"); String type=WebServicesTestUtils.getXmlString(element,"exception"); String classname=WebServicesTestUtils.getXmlString(element,"javaClassName"); verifyNonexistNodeException(message,type,classname); } finally { rm.stop(); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodesDefaultWithUnHealthyNode() throws JSONException, Exception { WebResource r=resource(); MockNM nm1=rm.registerNode("h1:1234",5120); MockNM nm2=rm.registerNode("h2:1235",5121); rm.sendNodeStarted(nm1); rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(),NodeState.NEW); MockNM nm3=rm.registerNode("h3:1236",5122); rm.NMwaitForState(nm3.getNodeId(),NodeState.NEW); rm.sendNodeStarted(nm3); rm.NMwaitForState(nm3.getNodeId(),NodeState.RUNNING); RMNodeImpl node=(RMNodeImpl)rm.getRMContext().getRMNodes().get(nm3.getNodeId()); NodeHealthStatus nodeHealth=NodeHealthStatus.newInstance(false,"test health report",System.currentTimeMillis()); node.handle(new RMNodeStatusEvent(nm3.getNodeId(),nodeHealth,new ArrayList(),null,null)); rm.NMwaitForState(nm3.getNodeId(),NodeState.UNHEALTHY); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject nodes=json.getJSONObject("nodes"); assertEquals("incorrect number of elements",1,nodes.length()); JSONArray nodeArray=nodes.getJSONArray("node"); assertEquals("incorrect number of elements",3,nodeArray.length()); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testInvalidNode() throws JSONException, Exception { rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid_foo").accept(MediaType.APPLICATION_JSON).get(JSONObject.class); fail("should have thrown exception on non-existent nodeid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.BAD_REQUEST,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); WebServicesTestUtils.checkStringMatch("exception message","Invalid NodeId \\[node_invalid_foo\\]. Expected host:port",message); WebServicesTestUtils.checkStringMatch("exception type","IllegalArgumentException",type); WebServicesTestUtils.checkStringMatch("exception classname","java.lang.IllegalArgumentException",classname); } finally { rm.stop(); } }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testNodesXML() throws JSONException, Exception { rm.start(); WebResource r=resource(); MockNM nm1=rm.registerNode("h1:1234",5120); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE,response.getType()); String xml=response.getEntity(String.class); DocumentBuilderFactory dbf=DocumentBuilderFactory.newInstance(); DocumentBuilder db=dbf.newDocumentBuilder(); InputSource is=new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom=db.parse(is); NodeList nodesApps=dom.getElementsByTagName("nodes"); assertEquals("incorrect number of elements",1,nodesApps.getLength()); NodeList nodes=dom.getElementsByTagName("node"); assertEquals("incorrect number of elements",1,nodes.getLength()); verifyNodesXML(nodes,nm1); rm.stop(); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testNodesQueryRunning() throws JSONException, Exception { WebResource r=resource(); MockNM nm1=rm.registerNode("h1:1234",5120); MockNM nm2=rm.registerNode("h2:1235",5121); rm.sendNodeStarted(nm1); rm.NMwaitForState(nm1.getNodeId(),NodeState.RUNNING); rm.NMwaitForState(nm2.getNodeId(),NodeState.NEW); ClientResponse response=r.path("ws").path("v1").path("cluster").path("nodes").queryParam("states","running").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject json=response.getEntity(JSONObject.class); assertEquals("incorrect number of elements",1,json.length()); JSONObject nodes=json.getJSONObject("nodes"); assertEquals("incorrect number of elements",1,nodes.length()); JSONArray nodeArray=nodes.getJSONArray("node"); assertEquals("incorrect number of elements",1,nodeArray.length()); }

    UtilityVerifier InternalCallVerifier EqualityVerifier HybridVerifier 
    @Test public void testNonexistNodeDefault() throws JSONException, Exception { rm.registerNode("h1:1234",5120); rm.registerNode("h2:1235",5121); WebResource r=resource(); try { r.path("ws").path("v1").path("cluster").path("nodes").path("node_invalid:99").get(JSONObject.class); fail("should have thrown exception on non-existent nodeid"); } catch ( UniformInterfaceException ue) { ClientResponse response=ue.getResponse(); assertEquals(Status.NOT_FOUND,response.getClientResponseStatus()); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); JSONObject msg=response.getEntity(JSONObject.class); JSONObject exception=msg.getJSONObject("RemoteException"); assertEquals("incorrect number of elements",3,exception.length()); String message=exception.getString("message"); String type=exception.getString("exception"); String classname=exception.getString("javaClassName"); verifyNonexistNodeException(message,type,classname); } finally { rm.stop(); } }

    Class: org.apache.hadoop.yarn.server.resourcemanager.webapp.TestRMWebappAuthentication

    APIUtilityVerifier UtilityVerifier EqualityVerifier HybridVerifier 
    @Test public void testSimpleAuth() throws Exception { rm.start(); URL url=new URL("http://localhost:8088/cluster"); HttpURLConnection conn=(HttpURLConnection)url.openConnection(); try { conn.getInputStream(); assertEquals(Status.OK.getStatusCode(),conn.getResponseCode()); } catch ( Exception e) { fail("Fetching url failed"); } if (UserGroupInformation.isSecurityEnabled()) { testAnonymousKerberosUser(); } else { testAnonymousSimpleUser(); } rm.stop(); }

    Class: org.apache.hadoop.yarn.server.timeline.TestGenericObjectMapper

    EqualityVerifier 
    @Test public void testValueTypes() throws IOException { verify(Integer.MAX_VALUE); verify(Integer.MIN_VALUE); assertEquals(Integer.MAX_VALUE,GenericObjectMapper.read(GenericObjectMapper.write((long)Integer.MAX_VALUE))); assertEquals(Integer.MIN_VALUE,GenericObjectMapper.read(GenericObjectMapper.write((long)Integer.MIN_VALUE))); verify((long)Integer.MAX_VALUE + 1l); verify((long)Integer.MIN_VALUE - 1l); verify(Long.MAX_VALUE); verify(Long.MIN_VALUE); assertEquals(42,GenericObjectMapper.read(GenericObjectMapper.write(42l))); verify(42); verify(1.23); verify("abc"); verify(true); List list=new ArrayList(); list.add("123"); list.add("abc"); verify(list); Map map=new HashMap(); map.put("k1","v1"); map.put("k2","v2"); verify(map); }

    Class: org.apache.hadoop.yarn.server.timeline.TestLeveldbTimelineStore

    InternalCallVerifier EqualityVerifier 
    @Test public void testCacheSizes(){ Configuration conf=new Configuration(); assertEquals(10000,LeveldbTimelineStore.getStartTimeReadCacheSize(conf)); assertEquals(10000,LeveldbTimelineStore.getStartTimeWriteCacheSize(conf)); conf.setInt(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE,10001); assertEquals(10001,LeveldbTimelineStore.getStartTimeReadCacheSize(conf)); conf=new Configuration(); conf.setInt(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE,10002); assertEquals(10002,LeveldbTimelineStore.getStartTimeWriteCacheSize(conf)); }

    EqualityVerifier 
    @Test public void testGetEntityTypes() throws IOException { List entityTypes=((LeveldbTimelineStore)store).getEntityTypes(); assertEquals(4,entityTypes.size()); assertEquals(entityType1,entityTypes.get(0)); assertEquals(entityType2,entityTypes.get(1)); assertEquals(entityType4,entityTypes.get(2)); assertEquals(entityType5,entityTypes.get(3)); }

    EqualityVerifier 
    @Test public void testDeleteEntities() throws IOException, InterruptedException { assertEquals(2,getEntities("type_1").size()); assertEquals(1,getEntities("type_2").size()); assertEquals(false,deleteNextEntity(entityType1,writeReverseOrderedLong(122l))); assertEquals(2,getEntities("type_1").size()); assertEquals(1,getEntities("type_2").size()); assertEquals(true,deleteNextEntity(entityType1,writeReverseOrderedLong(123l))); List entities=getEntities("type_2"); assertEquals(1,entities.size()); verifyEntityInfo(entityId2,entityType2,events2,Collections.singletonMap(entityType1,Collections.singleton(entityId1b)),EMPTY_PRIMARY_FILTERS,EMPTY_MAP,entities.get(0)); entities=getEntitiesWithPrimaryFilter("type_1",userFilter); assertEquals(1,entities.size()); verifyEntityInfo(entityId1b,entityType1,events1,EMPTY_REL_ENTITIES,primaryFilters,otherInfo,entities.get(0)); ((LeveldbTimelineStore)store).discardOldEntities(-123l); assertEquals(1,getEntities("type_1").size()); assertEquals(0,getEntities("type_2").size()); assertEquals(3,((LeveldbTimelineStore)store).getEntityTypes().size()); ((LeveldbTimelineStore)store).discardOldEntities(123l); assertEquals(0,getEntities("type_1").size()); assertEquals(0,getEntities("type_2").size()); assertEquals(0,((LeveldbTimelineStore)store).getEntityTypes().size()); assertEquals(0,getEntitiesWithPrimaryFilter("type_1",userFilter).size()); }

    EqualityVerifier 
    @Test public void testFromTsWithDeletion() throws IOException, InterruptedException { long l=System.currentTimeMillis(); assertEquals(2,getEntitiesFromTs("type_1",l).size()); assertEquals(1,getEntitiesFromTs("type_2",l).size()); assertEquals(2,getEntitiesFromTsWithPrimaryFilter("type_1",userFilter,l).size()); ((LeveldbTimelineStore)store).discardOldEntities(123l); assertEquals(0,getEntitiesFromTs("type_1",l).size()); assertEquals(0,getEntitiesFromTs("type_2",l).size()); assertEquals(0,getEntitiesFromTsWithPrimaryFilter("type_1",userFilter,l).size()); assertEquals(0,getEntities("type_1").size()); assertEquals(0,getEntities("type_2").size()); assertEquals(0,getEntitiesFromTsWithPrimaryFilter("type_1",userFilter,l).size()); loadTestData(); assertEquals(0,getEntitiesFromTs("type_1",l).size()); assertEquals(0,getEntitiesFromTs("type_2",l).size()); assertEquals(0,getEntitiesFromTsWithPrimaryFilter("type_1",userFilter,l).size()); assertEquals(2,getEntities("type_1").size()); assertEquals(1,getEntities("type_2").size()); assertEquals(2,getEntitiesWithPrimaryFilter("type_1",userFilter).size()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testRootDirPermission() throws IOException { FileSystem fs=FileSystem.getLocal(new YarnConfiguration()); FileStatus file=fs.getFileStatus(new Path(fsPath.getAbsolutePath(),LeveldbTimelineStore.FILENAME)); assertNotNull(file); assertEquals(LeveldbTimelineStore.LEVELDB_DIR_UMASK,file.getPermission()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testDeleteEntitiesPrimaryFilters() throws IOException, InterruptedException { Map primaryFilter=Collections.singletonMap("user",Collections.singleton((Object)"otheruser")); TimelineEntities atsEntities=new TimelineEntities(); atsEntities.setEntities(Collections.singletonList(createEntity(entityId1b,entityType1,789l,Collections.singletonList(ev2),null,primaryFilter,null))); TimelinePutResponse response=store.put(atsEntities); assertEquals(0,response.getErrors().size()); NameValuePair pfPair=new NameValuePair("user","otheruser"); List entities=getEntitiesWithPrimaryFilter("type_1",pfPair); assertEquals(1,entities.size()); verifyEntityInfo(entityId1b,entityType1,Collections.singletonList(ev2),EMPTY_REL_ENTITIES,primaryFilter,EMPTY_MAP,entities.get(0)); entities=getEntitiesWithPrimaryFilter("type_1",userFilter); assertEquals(2,entities.size()); verifyEntityInfo(entityId1,entityType1,events1,EMPTY_REL_ENTITIES,primaryFilters,otherInfo,entities.get(0)); verifyEntityInfo(entityId1b,entityType1,events1,EMPTY_REL_ENTITIES,primaryFilters,otherInfo,entities.get(1)); ((LeveldbTimelineStore)store).discardOldEntities(-123l); assertEquals(1,getEntitiesWithPrimaryFilter("type_1",pfPair).size()); assertEquals(2,getEntitiesWithPrimaryFilter("type_1",userFilter).size()); ((LeveldbTimelineStore)store).discardOldEntities(123l); assertEquals(0,getEntities("type_1").size()); assertEquals(0,getEntities("type_2").size()); assertEquals(0,((LeveldbTimelineStore)store).getEntityTypes().size()); assertEquals(0,getEntitiesWithPrimaryFilter("type_1",pfPair).size()); assertEquals(0,getEntitiesWithPrimaryFilter("type_1",userFilter).size()); }

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testCheckVersion() throws IOException { LeveldbTimelineStore dbStore=(LeveldbTimelineStore)store; Version defaultVersion=dbStore.getCurrentVersion(); Assert.assertEquals(defaultVersion,dbStore.loadVersion()); Version compatibleVersion=Version.newInstance(defaultVersion.getMajorVersion(),defaultVersion.getMinorVersion() + 2); dbStore.storeVersion(compatibleVersion); Assert.assertEquals(compatibleVersion,dbStore.loadVersion()); restartTimelineStore(); dbStore=(LeveldbTimelineStore)store; Assert.assertEquals(defaultVersion,dbStore.loadVersion()); Version incompatibleVersion=Version.newInstance(defaultVersion.getMajorVersion() + 1,defaultVersion.getMinorVersion()); dbStore.storeVersion(incompatibleVersion); try { restartTimelineStore(); Assert.fail("Incompatible version, should expect fail here."); } catch ( ServiceStateException e) { Assert.assertTrue("Exception message mismatch",e.getMessage().contains("Incompatible version for timeline store")); } }

    Class: org.apache.hadoop.yarn.server.timeline.webapp.TestCrossOriginFilterInitializer

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetFilterParameters(){ Configuration conf=new Configuration(); conf.set(CrossOriginFilterInitializer.PREFIX + "rootparam","rootvalue"); conf.set(CrossOriginFilterInitializer.PREFIX + "nested.param","nestedvalue"); conf.set("outofscopeparam","outofscopevalue"); Map filterParameters=CrossOriginFilterInitializer.getFilterParameters(conf); String rootvalue=filterParameters.get(CrossOriginFilterInitializer.PREFIX + "rootparam"); String nestedvalue=filterParameters.get(CrossOriginFilterInitializer.PREFIX + "nested.param"); String outofscopeparam=filterParameters.get("outofscopeparam"); Assert.assertEquals("Could not find filter parameter","rootvalue",rootvalue); Assert.assertEquals("Could not find filter parameter","nestedvalue",nestedvalue); Assert.assertNull("Found unexpected value in filter parameters",outofscopeparam); }

    Class: org.apache.hadoop.yarn.server.timeline.webapp.TestTimelineWebServices

    InternalCallVerifier EqualityVerifier 
    @Test public void testPrimaryFilterNumericStringWithQuotes(){ WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("primaryFilter","other:\"123abc\"").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); verifyEntities(response.getEntity(TimelineEntities.class)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testPrimaryFilterNumericString(){ WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("primaryFilter","other:123abc").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); assertEquals(0,response.getEntity(TimelineEntities.class).getEntities().size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testFromTs() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("fromTs",Long.toString(beforeTime)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); assertEquals(0,response.getEntity(TimelineEntities.class).getEntities().size()); response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("fromTs",Long.toString(System.currentTimeMillis())).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); assertEquals(2,response.getEntity(TimelineEntities.class).getEntities().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetEntitiesWithYarnACLsEnabled(){ AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager); try { TimelineEntities entities=new TimelineEntities(); TimelineEntity entity=new TimelineEntity(); entity.setEntityId("test id 4"); entity.setEntityType("test type 4"); entity.setStartTime(System.currentTimeMillis()); entities.addEntity(entity); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); entities=new TimelineEntities(); entity=new TimelineEntity(); entity.setEntityId("test id 5"); entity.setEntityType("test type 4"); entity.setStartTime(System.currentTimeMillis()); entities.addEntity(entity); r=resource(); response=r.path("ws").path("v1").path("timeline").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); response=r.path("ws").path("v1").path("timeline").queryParam("user.name","other").path("test type 4").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); entities=response.getEntity(TimelineEntities.class); assertEquals(1,entities.getEntities().size()); assertEquals("test type 4",entities.getEntities().get(0).getEntityType()); assertEquals("test id 5",entities.getEntities().get(0).getEntityId()); } finally { timelineACLsManager.setAdminACLsManager(oldAdminACLsManager); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testPrimaryFilterLong(){ WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("primaryFilter","long:" + Long.toString((long)Integer.MAX_VALUE + 1l)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); verifyEntities(response.getEntity(TimelineEntities.class)); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetEvents() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("events").queryParam("entityId","id_1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineEvents events=response.getEntity(TimelineEvents.class); Assert.assertNotNull(events); Assert.assertEquals(1,events.getAllEvents().size()); TimelineEvents.EventsOfOneEntity partEvents=events.getAllEvents().get(0); Assert.assertEquals(2,partEvents.getEvents().size()); TimelineEvent event1=partEvents.getEvents().get(0); Assert.assertEquals(456l,event1.getTimestamp()); Assert.assertEquals("end_event",event1.getEventType()); Assert.assertEquals(1,event1.getEventInfo().size()); TimelineEvent event2=partEvents.getEvents().get(1); Assert.assertEquals(123l,event2.getTimestamp()); Assert.assertEquals("start_event",event2.getEventType()); Assert.assertEquals(0,event2.getEventInfo().size()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testPostEntitiesWithYarnACLsEnabled() throws Exception { AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager); try { TimelineEntities entities=new TimelineEntities(); TimelineEntity entity=new TimelineEntity(); entity.setEntityId("test id 2"); entity.setEntityType("test type 2"); entity.setStartTime(System.currentTimeMillis()); entities.addEntity(entity); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelinePutResponse putResponse=response.getEntity(TimelinePutResponse.class); Assert.assertNotNull(putResponse); Assert.assertEquals(0,putResponse.getErrors().size()); response=r.path("ws").path("v1").path("timeline").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); putResponse=response.getEntity(TimelinePutResponse.class); Assert.assertNotNull(putResponse); Assert.assertEquals(1,putResponse.getErrors().size()); Assert.assertEquals(TimelinePutResponse.TimelinePutError.ACCESS_DENIED,putResponse.getErrors().get(0).getErrorCode()); } finally { timelineACLsManager.setAdminACLsManager(oldAdminACLsManager); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testPrimaryFilterString(){ WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("primaryFilter","user:username").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); verifyEntities(response.getEntity(TimelineEntities.class)); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetEntities() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); verifyEntities(response.getEntity(TimelineEntities.class)); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetEntityFields1() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("id_1").queryParam("fields","events,otherinfo").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineEntity entity=response.getEntity(TimelineEntity.class); Assert.assertNotNull(entity); Assert.assertEquals("id_1",entity.getEntityId()); Assert.assertEquals("type_1",entity.getEntityType()); Assert.assertEquals(123l,entity.getStartTime().longValue()); Assert.assertEquals(2,entity.getEvents().size()); Assert.assertEquals(0,entity.getPrimaryFilters().size()); Assert.assertEquals(4,entity.getOtherInfo().size()); }

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testPostEntitiesWithPrimaryFilter() throws Exception { TimelineEntities entities=new TimelineEntities(); TimelineEntity entity=new TimelineEntity(); Map filters=new HashMap(); filters.put(TimelineStore.SystemFilter.ENTITY_OWNER.toString(),new HashSet()); entity.setPrimaryFilters(filters); entity.setEntityId("test id 6"); entity.setEntityType("test type 6"); entity.setStartTime(System.currentTimeMillis()); entities.addEntity(entity); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); TimelinePutResponse putResposne=response.getEntity(TimelinePutResponse.class); Assert.assertEquals(1,putResposne.getErrors().size()); List errors=putResposne.getErrors(); Assert.assertEquals(TimelinePutResponse.TimelinePutError.SYSTEM_FILTER_CONFLICT,errors.get(0).getErrorCode()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetEntityWithYarnACLsEnabled() throws Exception { AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager); try { TimelineEntities entities=new TimelineEntities(); TimelineEntity entity=new TimelineEntity(); entity.setEntityId("test id 3"); entity.setEntityType("test type 3"); entity.setStartTime(System.currentTimeMillis()); entities.addEntity(entity); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); entity=response.getEntity(TimelineEntity.class); Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString())); response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("fields","relatedentities").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); entity=response.getEntity(TimelineEntity.class); Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString())); response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("fields","primaryfilters").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); entity=response.getEntity(TimelineEntity.class); Assert.assertNull(entity.getPrimaryFilters().get(TimelineStore.SystemFilter.ENTITY_OWNER.toString())); response=r.path("ws").path("v1").path("timeline").path("test type 3").path("test id 3").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); assertEquals(ClientResponse.Status.NOT_FOUND,response.getClientResponseStatus()); } finally { timelineACLsManager.setAdminACLsManager(oldAdminACLsManager); } }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetEntityFields2() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("id_1").queryParam("fields","lasteventonly," + "primaryfilters,relatedentities").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineEntity entity=response.getEntity(TimelineEntity.class); Assert.assertNotNull(entity); Assert.assertEquals("id_1",entity.getEntityId()); Assert.assertEquals("type_1",entity.getEntityType()); Assert.assertEquals(123l,entity.getStartTime().longValue()); Assert.assertEquals(1,entity.getEvents().size()); Assert.assertEquals(4,entity.getPrimaryFilters().size()); Assert.assertEquals(0,entity.getOtherInfo().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testFromId() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("fromId","id_2").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); assertEquals(1,response.getEntity(TimelineEntities.class).getEntities().size()); response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("fromId","id_1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); assertEquals(2,response.getEntity(TimelineEntities.class).getEntities().size()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSecondaryFilters(){ WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("secondaryFilter","user:username,appname:" + Integer.toString(Integer.MAX_VALUE)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); verifyEntities(response.getEntity(TimelineEntities.class)); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testAbout() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineWebServices.AboutInfo about=response.getEntity(TimelineWebServices.AboutInfo.class); Assert.assertNotNull(about); Assert.assertEquals("Timeline API",about.getAbout()); }

    APIUtilityVerifier InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testPostEntities() throws Exception { TimelineEntities entities=new TimelineEntities(); TimelineEntity entity=new TimelineEntity(); entity.setEntityId("test id 1"); entity.setEntityType("test type 1"); entity.setStartTime(System.currentTimeMillis()); entities.addEntity(entity); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); assertEquals(ClientResponse.Status.FORBIDDEN,response.getClientResponseStatus()); response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelinePutResponse putResposne=response.getEntity(TimelinePutResponse.class); Assert.assertNotNull(putResposne); Assert.assertEquals(0,putResposne.getErrors().size()); response=r.path("ws").path("v1").path("timeline").path("test type 1").path("test id 1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); entity=response.getEntity(TimelineEntity.class); Assert.assertNotNull(entity); Assert.assertEquals("test id 1",entity.getEntityId()); Assert.assertEquals("test type 1",entity.getEntityType()); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testGetEventsWithYarnACLsEnabled(){ AdminACLsManager oldAdminACLsManager=timelineACLsManager.setAdminACLsManager(adminACLsManager); try { TimelineEntities entities=new TimelineEntities(); TimelineEntity entity=new TimelineEntity(); entity.setEntityId("test id 5"); entity.setEntityType("test type 5"); entity.setStartTime(System.currentTimeMillis()); TimelineEvent event=new TimelineEvent(); event.setEventType("event type 1"); event.setTimestamp(System.currentTimeMillis()); entity.addEvent(event); entities.addEntity(entity); WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").queryParam("user.name","tester").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); entities=new TimelineEntities(); entity=new TimelineEntity(); entity.setEntityId("test id 6"); entity.setEntityType("test type 5"); entity.setStartTime(System.currentTimeMillis()); event=new TimelineEvent(); event.setEventType("event type 2"); event.setTimestamp(System.currentTimeMillis()); entity.addEvent(event); entities.addEntity(entity); r=resource(); response=r.path("ws").path("v1").path("timeline").queryParam("user.name","other").accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).post(ClientResponse.class,entities); response=r.path("ws").path("v1").path("timeline").path("test type 5").path("events").queryParam("user.name","other").queryParam("entityId","test id 5,test id 6").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineEvents events=response.getEntity(TimelineEvents.class); assertEquals(1,events.getAllEvents().size()); assertEquals("test id 6",events.getAllEvents().get(0).getEntityId()); } finally { timelineACLsManager.setAdminACLsManager(oldAdminACLsManager); } }

    InternalCallVerifier EqualityVerifier 
    @Test public void testPrimaryFilterInteger(){ WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").queryParam("primaryFilter","appname:" + Integer.toString(Integer.MAX_VALUE)).accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); verifyEntities(response.getEntity(TimelineEntities.class)); }

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testGetEntity() throws Exception { WebResource r=resource(); ClientResponse response=r.path("ws").path("v1").path("timeline").path("type_1").path("id_1").accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE,response.getType()); TimelineEntity entity=response.getEntity(TimelineEntity.class); Assert.assertNotNull(entity); Assert.assertEquals("id_1",entity.getEntityId()); Assert.assertEquals("type_1",entity.getEntityType()); Assert.assertEquals(123l,entity.getStartTime().longValue()); Assert.assertEquals(2,entity.getEvents().size()); Assert.assertEquals(4,entity.getPrimaryFilters().size()); Assert.assertEquals(4,entity.getOtherInfo().size()); }

    Class: org.apache.hadoop.yarn.server.timeline.webapp.TestTimelineWebServicesWithSSL

    InternalCallVerifier BooleanVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testPutEntities() throws Exception { TestTimelineClient client=new TestTimelineClient(); try { client.init(conf); client.start(); TimelineEntity expectedEntity=new TimelineEntity(); expectedEntity.setEntityType("test entity type"); expectedEntity.setEntityId("test entity id"); TimelineEvent event=new TimelineEvent(); event.setEventType("test event type"); event.setTimestamp(0L); expectedEntity.addEvent(event); TimelinePutResponse response=client.putEntities(expectedEntity); Assert.assertEquals(0,response.getErrors().size()); Assert.assertTrue(client.resp.toString().contains("https")); TimelineEntity actualEntity=store.getEntity(expectedEntity.getEntityId(),expectedEntity.getEntityType(),EnumSet.allOf(Field.class)); Assert.assertNotNull(actualEntity); Assert.assertEquals(expectedEntity.getEntityId(),actualEntity.getEntityId()); Assert.assertEquals(expectedEntity.getEntityType(),actualEntity.getEntityType()); } finally { client.stop(); client.close(); } }

    Class: org.apache.hadoop.yarn.server.webproxy.TestProxyUriUtils

    APIUtilityVerifier EqualityVerifier 
    @Test public void testGetProxyUriNull() throws Exception { URI originalUri=null; URI proxyUri=new URI("http://proxy.net:8080/"); ApplicationId id=BuilderUtils.newApplicationId(6384623l,5); URI expected=new URI("http://proxy.net:8080/proxy/application_6384623_0005/"); URI result=ProxyUriUtils.getProxyUri(originalUri,proxyUri,id); assertEquals(expected,result); }

    EqualityVerifier 
    @Test public void testGetPathAndQuery(){ assertEquals("/proxy/application_6384623_0005/static/app?foo=bar",ProxyUriUtils.getPathAndQuery(BuilderUtils.newApplicationId(6384623l,5),"/static/app","?foo=bar",false)); assertEquals("/proxy/application_6384623_0005/static/app?foo=bar&bad=good&proxyapproved=true",ProxyUriUtils.getPathAndQuery(BuilderUtils.newApplicationId(6384623l,5),"/static/app","foo=bar&bad=good",true)); }

    EqualityVerifier 
    @Test public void testGetPathApplicationId(){ assertEquals("/proxy/application_100_0001",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(100l,1))); assertEquals("/proxy/application_6384623_0005",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l,5))); }

    EqualityVerifier 
    @Test public void testGetPathApplicationIdString(){ assertEquals("/proxy/application_6384623_0005",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l,5),null)); assertEquals("/proxy/application_6384623_0005/static/app",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l,5),"/static/app")); assertEquals("/proxy/application_6384623_0005/",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l,5),"/")); assertEquals("/proxy/application_6384623_0005/some/path",ProxyUriUtils.getPath(BuilderUtils.newApplicationId(6384623l,5),"some/path")); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testGetProxyUri() throws Exception { URI originalUri=new URI("http://host.com/static/foo?bar=bar"); URI proxyUri=new URI("http://proxy.net:8080/"); ApplicationId id=BuilderUtils.newApplicationId(6384623l,5); URI expected=new URI("http://proxy.net:8080/proxy/application_6384623_0005/static/foo?bar=bar"); URI result=ProxyUriUtils.getProxyUri(originalUri,proxyUri,id); assertEquals(expected,result); }

    Class: org.apache.hadoop.yarn.server.webproxy.TestWebAppProxyServer

    APIUtilityVerifier EqualityVerifier 
    @Test public void testBindAddress(){ YarnConfiguration conf=new YarnConfiguration(); InetSocketAddress defaultBindAddress=WebAppProxyServer.getBindAddress(conf); Assert.assertEquals("Web Proxy default bind address port is incorrect",YarnConfiguration.DEFAULT_PROXY_PORT,defaultBindAddress.getPort()); }

    BranchVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testStart(){ assertEquals(STATE.INITED,webAppProxy.getServiceState()); webAppProxy.start(); for ( Service service : webAppProxy.getServices()) { if (service instanceof WebAppProxy) { assertEquals(((WebAppProxy)service).getBindAddress(),proxyAddress); } } assertEquals(STATE.STARTED,webAppProxy.getServiceState()); }

    Class: org.apache.hadoop.yarn.server.webproxy.TestWebAppProxyServlet

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test(timeout=5000) public void testWebAppProxyServlet() throws Exception { Configuration configuration=new Configuration(); configuration.set(YarnConfiguration.PROXY_ADDRESS,"localhost:9090"); configuration.setInt("hadoop.http.max.threads",5); WebAppProxyServerForTest proxy=new WebAppProxyServerForTest(); proxy.init(configuration); proxy.start(); int proxyPort=proxy.proxy.proxyServer.getConnectorAddress(0).getPort(); AppReportFetcherForTest appReportFetcher=proxy.proxy.appReportFetcher; try { URL wrongUrl=new URL("http://localhost:" + proxyPort + "/proxy/app"); HttpURLConnection proxyConn=(HttpURLConnection)wrongUrl.openConnection(); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,proxyConn.getResponseCode()); URL url=new URL("http://localhost:" + proxyPort + "/proxy/application_00_0"); proxyConn=(HttpURLConnection)url.openConnection(); proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true"); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode()); assertTrue(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true")); appReportFetcher.answer=1; proxyConn=(HttpURLConnection)url.openConnection(); proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true"); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_NOT_FOUND,proxyConn.getResponseCode()); assertFalse(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true")); appReportFetcher.answer=4; proxyConn=(HttpURLConnection)url.openConnection(); proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true"); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_NOT_FOUND,proxyConn.getResponseCode()); assertFalse(isResponseCookiePresent(proxyConn,"checked_application_0_0000","true")); appReportFetcher.answer=2; proxyConn=(HttpURLConnection)url.openConnection(); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode()); String s=readInputStream(proxyConn.getInputStream()); assertTrue(s.contains("to continue to an Application Master web interface owned by")); assertTrue(s.contains("WARNING: The following page may not be safe!")); appReportFetcher.answer=3; proxyConn=(HttpURLConnection)url.openConnection(); proxyConn.setRequestProperty("Cookie","checked_application_0_0000=true"); proxyConn.connect(); assertEquals(HttpURLConnection.HTTP_OK,proxyConn.getResponseCode()); } finally { proxy.close(); } }

    APIUtilityVerifier NullVerifier EqualityVerifier HybridVerifier 
    /** * Test main method of WebAppProxyServer */ @Test(timeout=5000) public void testWebAppProxyServerMainMethod() throws Exception { WebAppProxyServer mainServer=null; Configuration conf=new YarnConfiguration(); conf.set(YarnConfiguration.PROXY_ADDRESS,"localhost:9099"); try { mainServer=WebAppProxyServer.startServer(conf); int counter=20; URL wrongUrl=new URL("http://localhost:9099/proxy/app"); HttpURLConnection proxyConn=null; while (counter > 0) { counter--; try { proxyConn=(HttpURLConnection)wrongUrl.openConnection(); proxyConn.connect(); proxyConn.getResponseCode(); counter=0; } catch ( Exception e) { Thread.sleep(100); } } assertNotNull(proxyConn); assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR,proxyConn.getResponseCode()); } finally { if (mainServer != null) { mainServer.stop(); } } }

    Class: org.apache.hadoop.yarn.server.webproxy.amfilter.TestAmFilter

    UtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    /** * Test AmIpFilter */ @Test(timeout=1000) @SuppressWarnings("deprecation") public void testFilter() throws Exception { Map params=new HashMap(); params.put(AmIpFilter.PROXY_HOST,proxyHost); params.put(AmIpFilter.PROXY_URI_BASE,proxyUri); FilterConfig config=new DummyFilterConfig(params); FilterChain chain=new FilterChain(){ @Override public void doFilter( ServletRequest servletRequest, ServletResponse servletResponse) throws IOException, ServletException { doFilterRequest=servletRequest.getClass().getName(); if (servletRequest instanceof AmIpServletRequestWrapper) { servletWrapper=(AmIpServletRequestWrapper)servletRequest; } } } ; AmIpFilter testFilter=new AmIpFilter(); testFilter.init(config); HttpServletResponseForTest response=new HttpServletResponseForTest(); ServletRequest failRequest=Mockito.mock(ServletRequest.class); try { testFilter.doFilter(failRequest,response,chain); fail(); } catch ( ServletException e) { assertEquals("This filter only works for HTTP/HTTPS",e.getMessage()); } HttpServletRequest request=Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRemoteAddr()).thenReturn("redirect"); Mockito.when(request.getRequestURI()).thenReturn("/redirect"); testFilter.doFilter(request,response,chain); assertEquals("http://bogus/redirect",response.getRedirect()); Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1"); testFilter.doFilter(request,response,chain); assertTrue(doFilterRequest.contains("javax.servlet.http.HttpServletRequest")); Cookie[] cookies=new Cookie[1]; cookies[0]=new Cookie(WebAppProxyServlet.PROXY_USER_COOKIE_NAME,"user"); Mockito.when(request.getCookies()).thenReturn(cookies); testFilter.doFilter(request,response,chain); assertEquals("org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpServletRequestWrapper",doFilterRequest); assertEquals("user",servletWrapper.getUserPrincipal().getName()); assertEquals("user",servletWrapper.getRemoteUser()); assertFalse(servletWrapper.isUserInRole("")); }

    Class: org.apache.hadoop.yarn.server.webproxy.amfilter.TestAmFilterInitializer

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    @Test public void testGetProxyHostsAndPortsForAmFilter(){ Configuration conf=new Configuration(false); List proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf); assertEquals(1,proxyHosts.size()); assertEquals(WebAppUtils.getResolvedRMWebAppURLWithoutScheme(conf),proxyHosts.get(0)); conf=new Configuration(false); conf.set(YarnConfiguration.PROXY_ADDRESS,"host1:1000"); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000"); proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf); assertEquals(1,proxyHosts.size()); assertEquals("host1:1000",proxyHosts.get(0)); conf=new Configuration(false); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"host2:2000"); proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf); assertEquals(1,proxyHosts.size()); Collections.sort(proxyHosts); assertEquals("host2:2000",proxyHosts.get(0)); conf=new Configuration(false); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm4","dummy"); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1","host5:5000"); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2","host6:6000"); proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf); assertEquals(3,proxyHosts.size()); Collections.sort(proxyHosts); assertEquals("host2:2000",proxyHosts.get(0)); assertEquals("host3:3000",proxyHosts.get(1)); assertEquals("host4:4000",proxyHosts.get(2)); conf=new Configuration(false); conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,HttpConfig.Policy.HTTPS_ONLY.toString()); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3,dummy"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000"); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1","host5:5000"); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2","host6:6000"); proxyHosts=WebAppUtils.getProxyHostsAndPortsForAmFilter(conf); assertEquals(2,proxyHosts.size()); Collections.sort(proxyHosts); assertEquals("host5:5000",proxyHosts.get(0)); assertEquals("host6:6000",proxyHosts.get(1)); }

    NullVerifier EqualityVerifier PublicFieldVerifier HybridVerifier 
    @Test public void testInitFilter(){ MockFilterContainer con=new MockFilterContainer(); Configuration conf=new Configuration(false); conf.set(YarnConfiguration.PROXY_ADDRESS,"host1:1000"); AmFilterInitializer afi=new MockAmFilterInitializer(); assertNull(con.givenParameters); afi.initFilter(con,conf); assertEquals(2,con.givenParameters.size()); assertEquals("host1",con.givenParameters.get(AmIpFilter.PROXY_HOSTS)); assertEquals("http://host1:1000/foo",con.givenParameters.get(AmIpFilter.PROXY_URI_BASES)); con=new MockFilterContainer(); conf=new Configuration(false); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS,"host2:2000"); afi=new MockAmFilterInitializer(); assertNull(con.givenParameters); afi.initFilter(con,conf); assertEquals(2,con.givenParameters.size()); assertEquals("host2",con.givenParameters.get(AmIpFilter.PROXY_HOSTS)); assertEquals("http://host2:2000/foo",con.givenParameters.get(AmIpFilter.PROXY_URI_BASES)); con=new MockFilterContainer(); conf=new Configuration(false); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2,rm3"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm1","host2:2000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm2","host3:3000"); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS + ".rm3","host4:4000"); afi=new MockAmFilterInitializer(); assertNull(con.givenParameters); afi.initFilter(con,conf); assertEquals(2,con.givenParameters.size()); String[] proxyHosts=con.givenParameters.get(AmIpFilter.PROXY_HOSTS).split(AmIpFilter.PROXY_HOSTS_DELIMITER); assertEquals(3,proxyHosts.length); Arrays.sort(proxyHosts); assertEquals("host2",proxyHosts[0]); assertEquals("host3",proxyHosts[1]); assertEquals("host4",proxyHosts[2]); String[] proxyBases=con.givenParameters.get(AmIpFilter.PROXY_URI_BASES).split(AmIpFilter.PROXY_URI_BASES_DELIMITER); assertEquals(3,proxyBases.length); Arrays.sort(proxyBases); assertEquals("http://host2:2000/foo",proxyBases[0]); assertEquals("http://host3:3000/foo",proxyBases[1]); assertEquals("http://host4:4000/foo",proxyBases[2]); con=new MockFilterContainer(); conf=new Configuration(false); conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY,HttpConfig.Policy.HTTPS_ONLY.toString()); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED,true); conf.set(YarnConfiguration.RM_HA_IDS,"rm1,rm2"); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1","host5:5000"); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2","host6:6000"); afi=new MockAmFilterInitializer(); assertNull(con.givenParameters); afi.initFilter(con,conf); assertEquals(2,con.givenParameters.size()); proxyHosts=con.givenParameters.get(AmIpFilter.PROXY_HOSTS).split(AmIpFilter.PROXY_HOSTS_DELIMITER); assertEquals(2,proxyHosts.length); Arrays.sort(proxyHosts); assertEquals("host5",proxyHosts[0]); assertEquals("host6",proxyHosts[1]); proxyBases=con.givenParameters.get(AmIpFilter.PROXY_URI_BASES).split(AmIpFilter.PROXY_URI_BASES_DELIMITER); assertEquals(2,proxyBases.length); Arrays.sort(proxyBases); assertEquals("https://host5:5000/foo",proxyBases[0]); assertEquals("https://host6:6000/foo",proxyBases[1]); }

    Class: org.apache.hadoop.yarn.sls.appmaster.TestAMSimulator

    InternalCallVerifier NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testAMSimulator() throws Exception { MockAMSimulator app=new MockAMSimulator(); List containers=new ArrayList(); app.init(1,1000,containers,rm,null,0,1000000l,"user1","default",false,"app1"); app.firstStep(); Assert.assertEquals(1,rm.getRMContext().getRMApps().size()); Assert.assertNotNull(rm.getRMContext().getRMApps().get(app.appId)); app.lastStep(); }

    Class: org.apache.hadoop.yarn.sls.nodemanager.TestNMSimulator

    InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testNMSimulator() throws Exception { NMSimulator node1=new NMSimulator(); node1.init("rack1/node1",GB * 10,10,0,1000,rm); node1.middleStep(); Assert.assertEquals(1,rm.getResourceScheduler().getNumClusterNodes()); Assert.assertEquals(GB * 10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableMB()); Assert.assertEquals(10,rm.getResourceScheduler().getRootQueueMetrics().getAvailableVirtualCores()); ContainerId cId1=newContainerId(1,1,1); Container container1=Container.newInstance(cId1,null,null,Resources.createResource(GB,1),null,null); node1.addNewContainer(container1,100000l); Assert.assertTrue("Node1 should have one running container.",node1.getRunningContainers().containsKey(cId1)); ContainerId cId2=newContainerId(2,1,1); Container container2=Container.newInstance(cId2,null,null,Resources.createResource(GB,1),null,null); node1.addNewContainer(container2,-1l); Assert.assertTrue("Node1 should have one running AM container",node1.getAMContainers().contains(cId2)); node1.cleanupContainer(cId1); Assert.assertTrue("Container1 should be removed from Node1.",node1.getCompletedContainers().contains(cId1)); node1.cleanupContainer(cId2); Assert.assertFalse("Container2 should be removed from Node1.",node1.getAMContainers().contains(cId2)); }

    Class: org.apache.hadoop.yarn.sls.scheduler.TestTaskRunner

    BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testMultiTask() throws Exception { runner.start(); runner.schedule(new MultiTask(0,20,5)); MultiTask.latch.await(5000,TimeUnit.MILLISECONDS); Assert.assertTrue(MultiTask.first); Assert.assertEquals((20 - 0) / 5 - 2 + 1,MultiTask.middle); Assert.assertTrue(MultiTask.last); }

    Class: org.apache.hadoop.yarn.sls.utils.TestSLSUtils

    APIUtilityVerifier EqualityVerifier 
    @Test public void testGetRackHostname(){ String str="/rack1/node1"; String rackHostname[]=SLSUtils.getRackHostName(str); Assert.assertEquals(rackHostname[0],"rack1"); Assert.assertEquals(rackHostname[1],"node1"); }

    Class: org.apache.hadoop.yarn.util.TestConverterUtils

    APIUtilityVerifier EqualityVerifier 
    @Test public void testConvertUrlWithNoPort() throws URISyntaxException { Path expectedPath=new Path("hdfs://foo.com"); URL url=ConverterUtils.getYarnUrlFromPath(expectedPath); Path actualPath=ConverterUtils.getPathFromYarnURL(url); assertEquals(expectedPath,actualPath); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testConvertUrlWithUserinfo() throws URISyntaxException { Path expectedPath=new Path("foo://username:password@example.com:8042"); URL url=ConverterUtils.getYarnUrlFromPath(expectedPath); Path actualPath=ConverterUtils.getPathFromYarnURL(url); assertEquals(expectedPath,actualPath); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testContainerId() throws URISyntaxException { ContainerId id=TestContainerId.newContainerId(0,0,0,0); String cid=ConverterUtils.toString(id); assertEquals("container_0_0000_00_000000",cid); ContainerId gen=ConverterUtils.toContainerId(cid); assertEquals(gen,id); }

    Class: org.apache.hadoop.yarn.util.TestFSDownload

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testUniqueDestinationPath() throws Exception { Configuration conf=new Configuration(); FileContext files=FileContext.getLocalFSFileContext(conf); final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName())); files.mkdir(basedir,null,true); conf.setStrings(TestFSDownload.class.getName(),basedir.toString()); ExecutorService singleThreadedExec=Executors.newSingleThreadExecutor(); LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName()); Path destPath=dirs.getLocalPathForWrite(basedir.toString(),conf); destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet())); Path p=new Path(basedir,"dir" + 0 + ".jar"); LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE; LocalResource rsrc=createJar(files,p,vis); FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc); Future rPath=singleThreadedExec.submit(fsd); singleThreadedExec.shutdown(); while (!singleThreadedExec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ; Assert.assertTrue(rPath.isDone()); Assert.assertEquals(destPath,rPath.get().getParent()); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test(timeout=10000) public void testDownload() throws IOException, URISyntaxException, InterruptedException { Configuration conf=new Configuration(); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,"077"); FileContext files=FileContext.getLocalFSFileContext(conf); final Path basedir=files.makeQualified(new Path("target",TestFSDownload.class.getSimpleName())); files.mkdir(basedir,null,true); conf.setStrings(TestFSDownload.class.getName(),basedir.toString()); Map rsrcVis=new HashMap(); Random rand=new Random(); long sharedSeed=rand.nextLong(); rand.setSeed(sharedSeed); System.out.println("SEED: " + sharedSeed); Map> pending=new HashMap>(); ExecutorService exec=Executors.newSingleThreadExecutor(); LocalDirAllocator dirs=new LocalDirAllocator(TestFSDownload.class.getName()); int[] sizes=new int[10]; for (int i=0; i < 10; ++i) { sizes[i]=rand.nextInt(512) + 512; LocalResourceVisibility vis=LocalResourceVisibility.PRIVATE; if (i % 2 == 1) { vis=LocalResourceVisibility.APPLICATION; } Path p=new Path(basedir,"" + i); LocalResource rsrc=createFile(files,p,sizes[i],rand,vis); rsrcVis.put(rsrc,vis); Path destPath=dirs.getLocalPathForWrite(basedir.toString(),sizes[i],conf); destPath=new Path(destPath,Long.toString(uniqueNumberGenerator.incrementAndGet())); FSDownload fsd=new FSDownload(files,UserGroupInformation.getCurrentUser(),conf,destPath,rsrc); pending.put(rsrc,exec.submit(fsd)); } exec.shutdown(); while (!exec.awaitTermination(1000,TimeUnit.MILLISECONDS)) ; for ( Future path : pending.values()) { Assert.assertTrue(path.isDone()); } try { for ( Map.Entry> p : pending.entrySet()) { Path localized=p.getValue().get(); assertEquals(sizes[Integer.valueOf(localized.getName())],p.getKey().getSize()); FileStatus status=files.getFileStatus(localized.getParent()); FsPermission perm=status.getPermission(); assertEquals("Cache directory permissions are incorrect",new FsPermission((short)0755),perm); status=files.getFileStatus(localized); perm=status.getPermission(); System.out.println("File permission " + perm + " for rsrc vis "+ p.getKey().getVisibility().name()); assert (rsrcVis.containsKey(p.getKey())); Assert.assertTrue("Private file should be 500",perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort()); } } catch ( ExecutionException e) { throw new IOException("Failed exec",e); } }

    Class: org.apache.hadoop.yarn.util.TestLinuxResourceCalculatorPlugin

    InternalCallVerifier EqualityVerifier 
    /** * Test parsing /proc/stat and /proc/cpuinfo * @throws IOException */ @Test public void parsingProcStatAndCpuFile() throws IOException { long numProcessors=8; long cpuFrequencyKHz=2392781; String fileContent=""; for (int i=0; i < numProcessors; i++) { fileContent+=String.format(CPUINFO_FORMAT,i,cpuFrequencyKHz / 1000D) + "\n"; } File tempFile=new File(FAKE_CPUFILE); tempFile.deleteOnExit(); FileWriter fWriter=new FileWriter(FAKE_CPUFILE); fWriter.write(fileContent); fWriter.close(); assertEquals(plugin.getNumProcessors(),numProcessors); assertEquals(plugin.getCpuFrequency(),cpuFrequencyKHz); long uTime=54972994; long nTime=188860; long sTime=19803373; tempFile=new File(FAKE_STATFILE); tempFile.deleteOnExit(); updateStatFile(uTime,nTime,sTime); assertEquals(plugin.getCumulativeCpuTime(),FAKE_JIFFY_LENGTH * (uTime + nTime + sTime)); assertEquals(plugin.getCpuUsage(),(float)(LinuxResourceCalculatorPlugin.UNAVAILABLE),0.0); uTime+=100L; plugin.advanceTime(200L); updateStatFile(uTime,nTime,sTime); assertEquals(plugin.getCumulativeCpuTime(),FAKE_JIFFY_LENGTH * (uTime + nTime + sTime)); assertEquals(plugin.getCpuUsage(),6.25F,0.0); uTime+=600L; plugin.advanceTime(300L); updateStatFile(uTime,nTime,sTime); assertEquals(plugin.getCpuUsage(),25F,0.0); uTime+=1L; plugin.advanceTime(1L); updateStatFile(uTime,nTime,sTime); assertEquals(plugin.getCumulativeCpuTime(),FAKE_JIFFY_LENGTH * (uTime + nTime + sTime)); assertEquals(plugin.getCpuUsage(),25F,0.0); }

    InternalCallVerifier EqualityVerifier 
    /** * Test parsing /proc/meminfo * @throws IOException */ @Test public void parsingProcMemFile() throws IOException { long memTotal=4058864L; long memFree=99632L; long inactive=567732L; long swapTotal=2096472L; long swapFree=1818480L; File tempFile=new File(FAKE_MEMFILE); tempFile.deleteOnExit(); FileWriter fWriter=new FileWriter(FAKE_MEMFILE); fWriter.write(String.format(MEMINFO_FORMAT,memTotal,memFree,inactive,swapTotal,swapFree)); fWriter.close(); assertEquals(plugin.getAvailablePhysicalMemorySize(),1024L * (memFree + inactive)); assertEquals(plugin.getAvailableVirtualMemorySize(),1024L * (memFree + inactive + swapFree)); assertEquals(plugin.getPhysicalMemorySize(),1024L * memTotal); assertEquals(plugin.getVirtualMemorySize(),1024L * (memTotal + swapTotal)); }

    Class: org.apache.hadoop.yarn.util.TestProcfsBasedProcessTree

    APIUtilityVerifier InternalCallVerifier EqualityVerifier 
    /** * A basic test that creates a few process directories and writes stat files. * Verifies that the cpu time and memory is correctly computed. * @throws IOExceptionif there was a problem setting up the fake procfs directories or * files. */ @Test(timeout=30000) public void testCpuAndMemoryForProcessTree() throws IOException { String[] pids={"100","200","300","400"}; File procfsRootDir=new File(TEST_ROOT_DIR,"proc"); try { setupProcfsRootDir(procfsRootDir); setupPidDirs(procfsRootDir,pids); ProcessStatInfo[] procInfos=new ProcessStatInfo[4]; procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","1000","200"}); procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","2000","400"}); procInfos[2]=new ProcessStatInfo(new String[]{"300","proc3","200","100","100","300000","300","3000","600"}); procInfos[3]=new ProcessStatInfo(new String[]{"400","proc4","1","400","400","400000","400","4000","800"}); ProcessTreeSmapMemInfo[] memInfo=new ProcessTreeSmapMemInfo[4]; memInfo[0]=new ProcessTreeSmapMemInfo("100"); memInfo[1]=new ProcessTreeSmapMemInfo("200"); memInfo[2]=new ProcessTreeSmapMemInfo("300"); memInfo[3]=new ProcessTreeSmapMemInfo("400"); createMemoryMappingInfo(memInfo); writeStatFiles(procfsRootDir,pids,procInfos,memInfo); Configuration conf=new Configuration(); ProcfsBasedProcessTree processTree=createProcessTree("100",procfsRootDir.getAbsolutePath()); processTree.setConf(conf); processTree.updateProcessTree(); Assert.assertEquals("Cumulative virtual memory does not match",600000L,processTree.getCumulativeVmem()); long cumuRssMem=ProcfsBasedProcessTree.PAGE_SIZE > 0 ? 600L * ProcfsBasedProcessTree.PAGE_SIZE : 0L; Assert.assertEquals("Cumulative rss memory does not match",cumuRssMem,processTree.getCumulativeRssmem()); long cumuCpuTime=ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ? 7200L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L; Assert.assertEquals("Cumulative cpu time does not match",cumuCpuTime,processTree.getCumulativeCpuTime()); setSmapsInProceTree(processTree,true); Assert.assertEquals("Cumulative rss memory does not match",(100 * KB_TO_BYTES * 3),processTree.getCumulativeRssmem()); procInfos[0]=new ProcessStatInfo(new String[]{"100","proc1","1","100","100","100000","100","2000","300"}); procInfos[1]=new ProcessStatInfo(new String[]{"200","proc2","100","100","100","200000","200","3000","500"}); writeStatFiles(procfsRootDir,pids,procInfos,memInfo); processTree.updateProcessTree(); cumuCpuTime=ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ? 9400L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L; Assert.assertEquals("Cumulative cpu time does not match",cumuCpuTime,processTree.getCumulativeCpuTime()); } finally { FileUtil.fullyDelete(procfsRootDir); } }

    Class: org.apache.hadoop.yarn.util.TestRackResolver

    APIUtilityVerifier EqualityVerifier 
    @Test public void testCaching(){ Configuration conf=new Configuration(); conf.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,MyResolver.class,DNSToSwitchMapping.class); RackResolver.init(conf); try { InetAddress iaddr=InetAddress.getByName("host1"); MyResolver.resolvedHost1=iaddr.getHostAddress(); } catch ( UnknownHostException e) { } Node node=RackResolver.resolve("host1"); Assert.assertEquals("/rack1",node.getNetworkLocation()); node=RackResolver.resolve("host1"); Assert.assertEquals("/rack1",node.getNetworkLocation()); node=RackResolver.resolve(invalidHost); Assert.assertEquals(NetworkTopology.DEFAULT_RACK,node.getNetworkLocation()); }

    Class: org.apache.hadoop.yarn.util.TestRackResolverScriptBasedMapping

    EqualityVerifier 
    @Test public void testScriptName(){ Configuration conf=new Configuration(); conf.setClass(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,ScriptBasedMapping.class,DNSToSwitchMapping.class); conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,"testScript"); RackResolver.init(conf); Assert.assertEquals(RackResolver.getDnsToSwitchMapping().toString(),"script-based mapping with script testScript"); }

    Class: org.apache.hadoop.yarn.util.TestTimes

    EqualityVerifier 
    @Test public void testPositiveStartandFinishTimes(){ long elapsed=Times.elapsed(5,10,true); Assert.assertEquals("Elapsed time is not 5",5,elapsed); elapsed=Times.elapsed(5,10,false); Assert.assertEquals("Elapsed time is not 5",5,elapsed); }

    EqualityVerifier 
    @Test public void testNegativeStartandFinishTimes(){ long elapsed=Times.elapsed(-5,-10,false); Assert.assertEquals("Elapsed time is not -1",-1,elapsed); }

    EqualityVerifier 
    @Test public void testNegativeStartTimes(){ long elapsed=Times.elapsed(-5,10,true); Assert.assertEquals("Elapsed time is not 0",0,elapsed); elapsed=Times.elapsed(-5,10,false); Assert.assertEquals("Elapsed time is not -1",-1,elapsed); }

    EqualityVerifier 
    @Test public void testFinishTimesAheadOfStartTimes(){ long elapsed=Times.elapsed(10,5,true); Assert.assertEquals("Elapsed time is not -1",-1,elapsed); elapsed=Times.elapsed(10,5,false); Assert.assertEquals("Elapsed time is not -1",-1,elapsed); elapsed=Times.elapsed(Long.MAX_VALUE,0,true); Assert.assertEquals("Elapsed time is not -1",-1,elapsed); }

    EqualityVerifier 
    @Test public void testNegativeFinishTimes(){ long elapsed=Times.elapsed(5,-10,false); Assert.assertEquals("Elapsed time is not -1",-1,elapsed); }

    Class: org.apache.hadoop.yarn.webapp.TestParseRoute

    EqualityVerifier 
    @Test public void testDefaultController(){ assertEquals(Arrays.asList("/","default","index"),WebApp.parseRoute("/")); }

    EqualityVerifier 
    @Test public void testMissingAction(){ assertEquals(Arrays.asList("/foo","foo","index",":a1"),WebApp.parseRoute("/foo/:a1")); }

    EqualityVerifier 
    @Test public void testPartialCapture2(){ assertEquals(Arrays.asList("/foo/action","foo","action",":a1","bar",":a2",":a3"),WebApp.parseRoute("/foo/action/:a1/bar/:a2/:a3")); }

    EqualityVerifier 
    @Test public void testTrailingPaddings(){ assertEquals(Arrays.asList("/foo/action","foo","action",":a"),WebApp.parseRoute("/foo/action//:a / ")); assertEquals(Arrays.asList("/foo/action","foo","action"),WebApp.parseRoute("/foo/action / ")); }

    EqualityVerifier 
    @Test public void testDefaultAction(){ assertEquals(Arrays.asList("/foo","foo","index"),WebApp.parseRoute("/foo")); assertEquals(Arrays.asList("/foo","foo","index"),WebApp.parseRoute("/foo/")); }

    EqualityVerifier 
    @Test public void testLeadingPaddings(){ assertEquals(Arrays.asList("/foo/action","foo","action",":a"),WebApp.parseRoute(" /foo/action/ :a")); }

    EqualityVerifier 
    @Test public void testDefaultCapture(){ assertEquals(Arrays.asList("/","default","index",":a"),WebApp.parseRoute("/:a")); }

    EqualityVerifier 
    @Test public void testPartialCapture1(){ assertEquals(Arrays.asList("/foo/action/bar","foo","action","bar",":a"),WebApp.parseRoute("/foo/action/bar/:a")); }

    EqualityVerifier 
    @Test public void testNormalAction(){ assertEquals(Arrays.asList("/foo/action","foo","action",":a1",":a2"),WebApp.parseRoute("/foo/action/:a1/:a2")); }

    Class: org.apache.hadoop.yarn.webapp.TestWebApp

    InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
    @Test(expected=org.apache.hadoop.yarn.webapp.WebAppException.class) public void testCreateWithNonZeroPort(){ WebApp app=WebApps.$for(this).at(50000).start(); int port=app.getListenerAddress().getPort(); assertEquals(50000,port); WebApp app2=WebApps.$for(this).at(50000).start(); app.stop(); app2.stop(); }

    APIUtilityVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testCreateWithPort(){ WebApp app=WebApps.$for(this).at(0).start(); int port=app.getListenerAddress().getPort(); assertTrue(port > 0); app.stop(); app=WebApps.$for(this).at(port).start(); assertEquals(port,app.getListenerAddress().getPort()); app.stop(); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testDefaultRoutes() throws Exception { WebApp app=WebApps.$for("test",this).start(); String baseUrl=baseUrl(app); try { assertEquals("foo",getContent(baseUrl + "test/foo").trim()); assertEquals("foo",getContent(baseUrl + "test/foo/index").trim()); assertEquals("bar",getContent(baseUrl + "test/foo/bar").trim()); assertEquals("default",getContent(baseUrl + "test").trim()); assertEquals("default",getContent(baseUrl + "test/").trim()); assertEquals("default",getContent(baseUrl).trim()); } finally { app.stop(); } }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testCustomRoutes() throws Exception { WebApp app=WebApps.$for("test",TestWebApp.class,this,"ws").start(new WebApp(){ @Override public void setup(){ bind(MyTestJAXBContextResolver.class); bind(MyTestWebService.class); route("/:foo",FooController.class); route("/bar/foo",FooController.class,"bar"); route("/foo/:foo",DefaultController.class); route("/foo/bar/:foo",DefaultController.class,"index"); } } ); String baseUrl=baseUrl(app); try { assertEquals("foo",getContent(baseUrl).trim()); assertEquals("foo",getContent(baseUrl + "test").trim()); assertEquals("foo1",getContent(baseUrl + "test/1").trim()); assertEquals("bar",getContent(baseUrl + "test/bar/foo").trim()); assertEquals("default",getContent(baseUrl + "test/foo/bar").trim()); assertEquals("default1",getContent(baseUrl + "test/foo/1").trim()); assertEquals("default2",getContent(baseUrl + "test/foo/bar/2").trim()); assertEquals(404,getResponseCode(baseUrl + "test/goo")); assertEquals(200,getResponseCode(baseUrl + "ws/v1/test")); assertTrue(getContent(baseUrl + "ws/v1/test").contains("myInfo")); } finally { app.stop(); } }

    IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testServePathsNoName(){ WebApp app=WebApps.$for("",this).start(); assertEquals("/",app.getRedirectPath()); String[] expectedPaths={"/*"}; String[] pathSpecs=app.getServePathSpecs(); assertEquals(1,pathSpecs.length); for (int i=0; i < expectedPaths.length; i++) { assertTrue(ArrayUtils.contains(pathSpecs,expectedPaths[i])); } app.stop(); }

    APIUtilityVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testYARNWebAppContext() throws Exception { System.setProperty("hadoop.log.dir","/Not/Existing/dir"); WebApp app=WebApps.$for("test",this).start(new WebApp(){ @Override public void setup(){ route("/",FooController.class); } } ); String baseUrl=baseUrl(app); try { assertFalse("foo".equals(getContent(baseUrl + "static").trim())); assertEquals(404,getResponseCode(baseUrl + "logs")); assertEquals("foo",getContent(baseUrl).trim()); } finally { app.stop(); } }

    InternalCallVerifier EqualityVerifier ExceptionVerifier HybridVerifier 
    @Test(expected=org.apache.hadoop.yarn.webapp.WebAppException.class) public void testCreateWithBindAddressNonZeroPort(){ WebApp app=WebApps.$for(this).at("0.0.0.0:50000").start(); int port=app.getListenerAddress().getPort(); assertEquals(50000,port); WebApp app2=WebApps.$for(this).at("0.0.0.0:50000").start(); app.stop(); app2.stop(); }

    IterativeVerifier InternalCallVerifier BooleanVerifier EqualityVerifier HybridVerifier 
    @Test public void testServePaths(){ WebApp app=WebApps.$for("test",this).start(); assertEquals("/test",app.getRedirectPath()); String[] expectedPaths={"/test","/test/*"}; String[] pathSpecs=app.getServePathSpecs(); assertEquals(2,pathSpecs.length); for (int i=0; i < expectedPaths.length; i++) { assertTrue(ArrayUtils.contains(pathSpecs,expectedPaths[i])); } app.stop(); }

    Class: org.apache.hadoop.yarn.webapp.hamlet.TestHamlet

    InternalCallVerifier EqualityVerifier 
    @Test public void testTable(){ Hamlet h=newHamlet().title("test table").link("style.css"); TABLE t=h.table("#id"); for (int i=0; i < 3; ++i) { t.tr().td("1").td("2")._(); } t._(); PrintWriter out=h.getWriter(); out.flush(); assertEquals(0,h.nestLevel); verify(out).print(""); verify(out,never()).print(""); verify(out,never()).print(""); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testEnumAttrs(){ Hamlet h=newHamlet().meta_http("Content-type","text/html; charset=utf-8").title("test enum attrs").link().$rel("stylesheet").$media(EnumSet.of(Media.screen,Media.print)).$type("text/css").$href("style.css")._().link().$rel(EnumSet.of(LinkType.index,LinkType.start)).$href("index.html")._(); h.div("#content")._("content")._(); PrintWriter out=h.getWriter(); out.flush(); assertEquals(0,h.nestLevel); verify(out).print(" media=\"screen, print\""); verify(out).print(" rel=\"start index\""); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testPreformatted(){ Hamlet h=newHamlet().div().i("inline before pre").pre()._("pre text1\npre text2").i("inline in pre")._("pre text after inline")._().i("inline after pre")._(); PrintWriter out=h.getWriter(); out.flush(); assertEquals(5,h.indents); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testScriptStyle(){ Hamlet h=newHamlet().script("a.js").script("b.js").style("h1 { font-size: 1.2em }"); PrintWriter out=h.getWriter(); out.flush(); assertEquals(0,h.nestLevel); verify(out,times(2)).print(" type=\"text/javascript\""); verify(out).print(" type=\"text/css\""); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testSubViews(){ Hamlet h=newHamlet().title("test sub-views").div("#view1")._(TestView1.class)._().div("#view2")._(TestView2.class)._(); PrintWriter out=h.getWriter(); out.flush(); assertEquals(0,h.nestLevel); verify(out).print("[" + TestView1.class.getName() + "]"); verify(out).print("[" + TestView2.class.getName() + "]"); }

    InternalCallVerifier EqualityVerifier 
    @Test public void testHamlet(){ Hamlet h=newHamlet().title("test").h1("heading 1").p("#id.class").b("hello").em("world!")._().div("#footer")._("Brought to you by").a("http://hostname/","Somebody")._(); PrintWriter out=h.getWriter(); out.flush(); assertEquals(0,h.nestLevel); verify(out).print(""); verify(out).print(""); verify(out).print(""); verify(out).print(""); verify(out).print(""); verify(out).print(""); verify(out,never()).print("

    "); }

    Class: org.apache.hadoop.yarn.webapp.hamlet.TestHamletImpl

    InternalCallVerifier EqualityVerifier 
    /** * Test the generic implementation methods * @see TestHamlet for Hamlet syntax */ @Test public void testGeneric(){ PrintWriter out=spy(new PrintWriter(System.out)); HamletImpl hi=new HamletImpl(out,0,false); hi.root("start")._attr("name","value")._("start text").elem("sub")._attr("name","value")._("sub text")._().elem("sub1")._noEndTag()._attr("boolean",null)._("sub1text")._()._("start text2").elem("pre")._pre()._("pre text").elem("i")._inline()._("inline")._()._().elem("i")._inline()._("inline after pre")._()._("start text3").elem("sub2")._("sub2text")._().elem("sub3")._noEndTag()._("sub3text")._().elem("sub4")._noEndTag().elem("i")._inline()._("inline")._()._("sub4text")._()._(); out.flush(); assertEquals(0,hi.nestLevel); assertEquals(20,hi.indents); verify(out).print(""); verify(out,never()).print(""); verify(out,never()).print(""); verify(out,never()).print(""); }

    Class: org.apache.hadoop.yarn.webapp.hamlet.TestParseSelector

    NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testMissingId(){ String[] res=parseSelector(".class"); assertNull(res[S_ID]); assertEquals("class",res[S_CLASS]); }

    NullVerifier EqualityVerifier HybridVerifier 
    @Test public void testMissingClass(){ String[] res=parseSelector("#id"); assertEquals("id",res[S_ID]); assertNull(res[S_CLASS]); }

    EqualityVerifier 
    @Test public void testMultiClass(){ String[] res=parseSelector("#id.class1.class2"); assertEquals("id",res[S_ID]); assertEquals("class1 class2",res[S_CLASS]); }

    EqualityVerifier 
    @Test public void testNormal(){ String[] res=parseSelector("#id.class"); assertEquals("id",res[S_ID]); assertEquals("class",res[S_CLASS]); }

    Class: org.apache.hadoop.yarn.webapp.util.TestWebAppUtils

    EqualityVerifier 
    @Test public void testGetPassword() throws Exception { Configuration conf=provisionCredentialsForSSL(); Assert.assertEquals("keypass",WebAppUtils.getPassword(conf,WebAppUtils.WEB_APP_KEY_PASSWORD_KEY)); Assert.assertEquals("storepass",WebAppUtils.getPassword(conf,WebAppUtils.WEB_APP_KEYSTORE_PASSWORD_KEY)); Assert.assertEquals("trustpass",WebAppUtils.getPassword(conf,WebAppUtils.WEB_APP_TRUSTSTORE_PASSWORD_KEY)); Assert.assertEquals(null,WebAppUtils.getPassword(conf,"invalid-alias")); }

    APIUtilityVerifier EqualityVerifier 
    @Test public void testLoadSslConfiguration() throws Exception { Configuration conf=provisionCredentialsForSSL(); TestBuilder builder=(TestBuilder)new TestBuilder(); builder=(TestBuilder)WebAppUtils.loadSslConfiguration(builder,conf); String keypass="keypass"; String storepass="storepass"; String trustpass="trustpass"; assertEquals(keypass,((TestBuilder)builder).keypass); assertEquals(storepass,((TestBuilder)builder).keystorePassword); assertEquals(trustpass,((TestBuilder)builder).truststorePassword); }